You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/03 04:41:48 UTC
[01/13] lucenenet git commit: Lucene.Net.Analysis.Da refactor: member
accessibility and documentation comments
Repository: lucenenet
Updated Branches:
refs/heads/api-work 816f0c9b4 -> 20087487f
Lucene.Net.Analysis.Da refactor: member accessibility and documentation comments
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/363ea8ea
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/363ea8ea
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/363ea8ea
Branch: refs/heads/api-work
Commit: 363ea8eaf3cbd40ccb774aeaf7365c15e056e14f
Parents: 816f0c9
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 10:23:28 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 10:23:28 2017 +0700
----------------------------------------------------------------------
.../Analysis/Da/DanishAnalyzer.cs | 26 ++++++++++----------
1 file changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/363ea8ea/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
index 9ff3179..221979b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Da
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Danish.
+ /// <see cref="Analyzer"/> for Danish.
/// </summary>
public sealed class DanishAnalyzer : StopwordAnalyzerBase
{
@@ -51,7 +51,7 @@ namespace Lucene.Net.Analysis.Da
}
/// <summary>
- /// Atomically loads the DEFAULT_STOP_SET in a lazy fashion once the outer class
+ /// Atomically loads the <see cref="DEFAULT_STOP_SET"/> in a lazy fashion once the outer class
/// accesses the static final set the first time.;
/// </summary>
private class DefaultSetHolder
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Da
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public DanishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Da
/// <summary>
/// Builds an analyzer with the given stop words.
/// </summary>
- /// <param name="matchVersion"> lucene compatibility version </param>
+ /// <param name="matchVersion"> <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords"> a stopword set </param>
public DanishAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords)
: this(matchVersion, stopwords, CharArraySet.EMPTY_SET)
@@ -98,10 +98,10 @@ namespace Lucene.Net.Analysis.Da
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
- /// <param name="matchVersion"> lucene compatibility version </param>
+ /// <param name="matchVersion"> <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords"> a stopword set </param>
/// <param name="stemExclusionSet"> a set of terms not to be stemmed </param>
public DanishAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords, CharArraySet stemExclusionSet)
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Da
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="TextReader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
+ /// <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
[03/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
index d5fad67..bfa7751 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Removes elisions from a <seealso cref="TokenStream"/>. For example, "l'avion" (the plane) will be
+ /// Removes elisions from a <see cref="TokenStream"/>. For example, "l'avion" (the plane) will be
/// tokenized as "avion" (plane).
/// </summary>
/// <seealso cref= <a href="http://fr.wikipedia.org/wiki/%C3%89lision">Elision in Wikipedia</a> </seealso>
@@ -31,7 +31,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Constructs an elision filter with a Set of stop words </summary>
- /// <param name="input"> the source <seealso cref="TokenStream"/> </param>
+ /// <param name="input"> the source <see cref="TokenStream"/> </param>
/// <param name="articles"> a set of stopword articles </param>
public ElisionFilter(TokenStream input, CharArraySet articles)
: base(input)
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Increments the <seealso cref="TokenStream"/> with a <seealso cref="CharTermAttribute"/> without elisioned start
+ /// Increments the <see cref="TokenStream"/> with a <see cref="CharTermAttribute"/> without elisioned start
/// </summary>
public override bool IncrementToken()
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
index 71c240e..f12f57b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Factory for <seealso cref="ElisionFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ElisionFilter"/>.
+ /// <code>
/// <fieldType name="text_elsn" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Util
/// <filter class="solr.ElisionFilterFactory"
/// articles="stopwordarticles.txt" ignoreCase="true"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ElisionFilterFactory : TokenFilterFactory, IResourceLoaderAware, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
index fb4d438..09aab01 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Simple <seealso cref="ResourceLoader"/> that opens resource files
+ /// Simple <see cref="ResourceLoader"/> that opens resource files
/// from the local file system, optionally resolving against
/// a base directory.
///
- /// <para>This loader wraps a delegate <seealso cref="ResourceLoader"/>
+ /// <para>This loader wraps a delegate <see cref="ResourceLoader"/>
/// that is used to resolve all files, the current base directory
- /// does not contain. <seealso cref="#newInstance"/> is always resolved
- /// against the delegate, as a <seealso cref="ClassLoader"/> is needed.
+ /// does not contain. <see cref="#newInstance"/> is always resolved
+ /// against the delegate, as a <see cref="ClassLoader"/> is needed.
///
/// </para>
/// <para>You can chain several {@code FilesystemResourceLoader}s
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.Util
/// Creates a resource loader that resolves resources against the given
/// base directory (may be {@code null} to refer to CWD).
/// Files not found in file system and class lookups are delegated
- /// to the given delegate <seealso cref="ResourceLoader"/>.
+ /// to the given delegate <see cref="ResourceLoader"/>.
/// </summary>
public FilesystemResourceLoader(DirectoryInfo baseDirectory, IResourceLoader @delegate)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
index 688c890..241c8da 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
@@ -23,11 +23,11 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Abstract base class for TokenFilters that may remove tokens.
- /// You have to implement <seealso cref="#accept"/> and return a boolean if the current
- /// token should be preserved. <seealso cref="#incrementToken"/> uses this method
+ /// You have to implement <see cref="#accept"/> and return a boolean if the current
+ /// token should be preserved. <see cref="#incrementToken"/> uses this method
/// to decide if a token should be passed to the caller.
/// <para><a name="lucene_match_version" />As of Lucene 4.4, an
- /// <seealso cref="IllegalArgumentException"/> is thrown when trying to disable position
+ /// <see cref="IllegalArgumentException"/> is thrown when trying to disable position
/// increments when filtering terms.
/// </para>
/// </summary>
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.Util
private int skippedPositions;
/// <summary>
- /// Create a new <seealso cref="FilteringTokenFilter"/>. </summary>
+ /// Create a new <see cref="FilteringTokenFilter"/>. </summary>
/// <param name="version"> the <a href="#lucene_match_version">Lucene match version</a> </param>
/// <param name="enablePositionIncrements"> whether to increment position increments when filtering out terms </param>
/// <param name="input"> the input to consume </param>
@@ -66,9 +66,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Create a new <seealso cref="FilteringTokenFilter"/>. </summary>
+ /// Create a new <see cref="FilteringTokenFilter"/>. </summary>
/// <param name="version"> the Lucene match version </param>
- /// <param name="in"> the <seealso cref="TokenStream"/> to consume </param>
+ /// <param name="in"> the <see cref="TokenStream"/> to consume </param>
public FilteringTokenFilter(LuceneVersion version, TokenStream @in)
: base(@in)
{
@@ -78,7 +78,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Override this method and return if the current input token should be returned by <seealso cref="#incrementToken"/>. </summary>
+ /// Override this method and return if the current input token should be returned by <see cref="#incrementToken"/>. </summary>
protected abstract bool Accept();
public override sealed bool IncrementToken()
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
index 6f12908..f9c0506 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
@@ -19,7 +19,7 @@
/// <summary>
/// Interface for a component that needs to be initialized by
- /// an implementation of <seealso cref="ResourceLoader"/>.
+ /// an implementation of <see cref="ResourceLoader"/>.
/// </summary>
/// <seealso cref= ResourceLoader </seealso>
public interface IResourceLoaderAware
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
index 073c10b..6bddce4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Util
/// characters that haven't been freed yet. This is like a
/// PushbackReader, except you don't have to specify
/// up-front the max size of the buffer, but you do have to
- /// periodically call <seealso cref="#freeBefore"/>.
+ /// periodically call <see cref="#freeBefore"/>.
/// </summary>
public sealed class RollingCharBuffer
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
index 0a9ad07..c19ace3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Creates a CharArraySet from a file resource associated with a class. (See
- /// <seealso cref="Class#getResourceAsStream(String)"/>).
+ /// <see cref="Class#getResourceAsStream(String)"/>).
/// </summary>
/// <param name="ignoreCase">
/// <code>true</code> if the set should ignore the case of the
@@ -93,7 +93,7 @@ namespace Lucene.Net.Analysis.Util
/// <returns> a CharArraySet containing the distinct stopwords from the given
/// file </returns>
/// <exception cref="IOException">
- /// if loading the stopwords throws an <seealso cref="IOException"/> </exception>
+ /// if loading the stopwords throws an <see cref="IOException"/> </exception>
protected internal static CharArraySet LoadStopwordSet(bool ignoreCase, Type aClass, string resource, string comment)
{
TextReader reader = null;
@@ -124,7 +124,7 @@ namespace Lucene.Net.Analysis.Util
/// <returns> a CharArraySet containing the distinct stopwords from the given
/// file </returns>
/// <exception cref="IOException">
- /// if loading the stopwords throws an <seealso cref="IOException"/> </exception>
+ /// if loading the stopwords throws an <see cref="IOException"/> </exception>
protected internal static CharArraySet LoadStopwordSet(FileInfo stopwords, LuceneVersion matchVersion)
{
TextReader reader = null;
@@ -150,7 +150,7 @@ namespace Lucene.Net.Analysis.Util
/// <returns> a CharArraySet containing the distinct stopwords from the given
/// reader </returns>
/// <exception cref="IOException">
- /// if loading the stopwords throws an <seealso cref="IOException"/> </exception>
+ /// if loading the stopwords throws an <see cref="IOException"/> </exception>
protected internal static CharArraySet LoadStopwordSet(TextReader stopwords, LuceneVersion matchVersion)
{
try
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
index afa0557..b2822d2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Abstract parent class for analysis factories that create <seealso cref="TokenFilter"/>
+ /// Abstract parent class for analysis factories that create <see cref="TokenFilter"/>
/// instances.
/// </summary>
public abstract class TokenFilterFactory : AbstractAnalysisFactory
@@ -50,9 +50,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Reloads the factory list from the given <seealso cref="ClassLoader"/>.
+ /// Reloads the factory list from the given <see cref="ClassLoader"/>.
/// Changes to the factories are visible after the method ends, all
- /// iterators (<seealso cref="#availableTokenFilters()"/>,...) stay consistent.
+ /// iterators (<see cref="#availableTokenFilters()"/>,...) stay consistent.
///
/// <para><b>NOTE:</b> Only new factories are added, existing ones are
/// never removed or replaced.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
index 127be40..285f090 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Abstract parent class for analysis factories that create <seealso cref="Tokenizer"/>
+ /// Abstract parent class for analysis factories that create <see cref="Tokenizer"/>
/// instances.
/// </summary>
public abstract class TokenizerFactory : AbstractAnalysisFactory
@@ -53,9 +53,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Reloads the factory list from the given <seealso cref="ClassLoader"/>.
+ /// Reloads the factory list from the given <see cref="ClassLoader"/>.
/// Changes to the factories are visible after the method ends, all
- /// iterators (<seealso cref="#availableTokenizers()"/>,...) stay consistent.
+ /// iterators (<see cref="#availableTokenizers()"/>,...) stay consistent.
///
/// <para><b>NOTE:</b> Only new factories are added, existing ones are
/// never removed or replaced.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
index 4005c55..d091f87 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Loader for text files that represent a list of stopwords.
/// </summary>
- /// <seealso cref= IOUtils to obtain <seealso cref="Reader"/> instances
+ /// <seealso cref= IOUtils to obtain <see cref="Reader"/> instances
/// @lucene.internal </seealso>
public class WordlistLoader
{
@@ -49,8 +49,8 @@ namespace Lucene.Net.Analysis.Util
/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
/// </summary>
/// <param name="reader"> TextReader containing the wordlist </param>
- /// <param name="result"> the <seealso cref="CharArraySet"/> to fill with the readers words </param>
- /// <returns> the given <seealso cref="CharArraySet"/> with the reader's words </returns>
+ /// <param name="result"> the <see cref="CharArraySet"/> to fill with the readers words </param>
+ /// <returns> the given <see cref="CharArraySet"/> with the reader's words </returns>
public static CharArraySet GetWordSet(TextReader reader, CharArraySet result)
{
try
@@ -76,8 +76,8 @@ namespace Lucene.Net.Analysis.Util
/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
/// </summary>
/// <param name="reader"> TextReader containing the wordlist </param>
- /// <param name="matchVersion"> the <seealso cref="LuceneVersion"/> </param>
- /// <returns> A <seealso cref="CharArraySet"/> with the reader's words </returns>
+ /// <param name="matchVersion"> the <see cref="LuceneVersion"/> </param>
+ /// <returns> A <see cref="CharArraySet"/> with the reader's words </returns>
public static CharArraySet GetWordSet(TextReader reader, LuceneVersion matchVersion)
{
return GetWordSet(reader, new CharArraySet(matchVersion, INITIAL_CAPACITY, false));
@@ -91,7 +91,7 @@ namespace Lucene.Net.Analysis.Util
/// </summary>
/// <param name="reader"> TextReader containing the wordlist </param>
/// <param name="comment"> The string representing a comment. </param>
- /// <param name="matchVersion"> the <seealso cref="LuceneVersion"/> </param>
+ /// <param name="matchVersion"> the <see cref="LuceneVersion"/> </param>
/// <returns> A CharArraySet with the reader's words </returns>
public static CharArraySet GetWordSet(TextReader reader, string comment, LuceneVersion matchVersion)
{
@@ -106,8 +106,8 @@ namespace Lucene.Net.Analysis.Util
/// </summary>
/// <param name="reader"> TextReader containing the wordlist </param>
/// <param name="comment"> The string representing a comment. </param>
- /// <param name="result"> the <seealso cref="CharArraySet"/> to fill with the readers words </param>
- /// <returns> the given <seealso cref="CharArraySet"/> with the reader's words </returns>
+ /// <param name="result"> the <see cref="CharArraySet"/> to fill with the readers words </param>
+ /// <returns> the given <see cref="CharArraySet"/> with the reader's words </returns>
public static CharArraySet GetWordSet(TextReader reader, string comment, CharArraySet result)
{
try
@@ -141,8 +141,8 @@ namespace Lucene.Net.Analysis.Util
/// </para>
/// </summary>
/// <param name="reader"> TextReader containing a Snowball stopword list </param>
- /// <param name="result"> the <seealso cref="CharArraySet"/> to fill with the readers words </param>
- /// <returns> the given <seealso cref="CharArraySet"/> with the reader's words </returns>
+ /// <param name="result"> the <see cref="CharArraySet"/> to fill with the readers words </param>
+ /// <returns> the given <see cref="CharArraySet"/> with the reader's words </returns>
public static CharArraySet GetSnowballWordSet(TextReader reader, CharArraySet result)
{
try
@@ -184,8 +184,8 @@ namespace Lucene.Net.Analysis.Util
/// </para>
/// </summary>
/// <param name="reader"> TextReader containing a Snowball stopword list </param>
- /// <param name="matchVersion"> the Lucene <seealso cref="Version"/> </param>
- /// <returns> A <seealso cref="CharArraySet"/> with the reader's words </returns>
+ /// <param name="matchVersion"> the Lucene <see cref="LuceneVersion"/> </param>
+ /// <returns> A <see cref="CharArraySet"/> with the reader's words </returns>
public static CharArraySet GetSnowballWordSet(TextReader reader, LuceneVersion matchVersion)
{
return GetSnowballWordSet(reader, new CharArraySet(matchVersion, INITIAL_CAPACITY, false));
@@ -194,7 +194,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Reads a stem dictionary. Each line contains:
- /// <pre>word<b>\t</b>stem</pre>
+ /// <pre>word<b>\t</b>stem</code>
/// (i.e. two tab separated words)
/// </summary>
/// <returns> stem dictionary that overrules the stemming algorithm </returns>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
index 8cfc982..f815db4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
@@ -81,7 +81,7 @@ namespace Lucene.Net.Analysis.Wikipedia
/// </summary>
public const int BOTH = 2;
/// <summary>
- /// This flag is used to indicate that the produced "Token" would, if <seealso cref="#TOKENS_ONLY"/> was used, produce multiple tokens.
+ /// This flag is used to indicate that the produced "Token" would, if <see cref="#TOKENS_ONLY"/> was used, produce multiple tokens.
/// </summary>
public const int UNTOKENIZED_TOKEN_FLAG = 1;
/// <summary>
@@ -102,7 +102,7 @@ namespace Lucene.Net.Analysis.Wikipedia
private bool first;
/// <summary>
- /// Creates a new instance of the <seealso cref="WikipediaTokenizer"/>. Attaches the
+ /// Creates a new instance of the <see cref="WikipediaTokenizer"/>. Attaches the
/// <code>input</code> to a newly created JFlex scanner.
/// </summary>
/// <param name="input"> The Input TextReader </param>
@@ -112,11 +112,11 @@ namespace Lucene.Net.Analysis.Wikipedia
}
/// <summary>
- /// Creates a new instance of the <seealso cref="org.apache.lucene.analysis.wikipedia.WikipediaTokenizer"/>. Attaches the
+ /// Creates a new instance of the <see cref="org.apache.lucene.analysis.wikipedia.WikipediaTokenizer"/>. Attaches the
/// <code>input</code> to a the newly created JFlex scanner.
/// </summary>
/// <param name="input"> The input </param>
- /// <param name="tokenOutput"> One of <seealso cref="#TOKENS_ONLY"/>, <seealso cref="#UNTOKENIZED_ONLY"/>, <seealso cref="#BOTH"/> </param>
+ /// <param name="tokenOutput"> One of <see cref="#TOKENS_ONLY"/>, <see cref="#UNTOKENIZED_ONLY"/>, <see cref="#BOTH"/> </param>
public WikipediaTokenizer(TextReader input, int tokenOutput, ICollection<string> untokenizedTypes)
: base(input)
{
@@ -125,11 +125,11 @@ namespace Lucene.Net.Analysis.Wikipedia
}
/// <summary>
- /// Creates a new instance of the <seealso cref="org.apache.lucene.analysis.wikipedia.WikipediaTokenizer"/>. Attaches the
- /// <code>input</code> to a the newly created JFlex scanner. Uses the given <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>.
+ /// Creates a new instance of the <see cref="org.apache.lucene.analysis.wikipedia.WikipediaTokenizer"/>. Attaches the
+ /// <code>input</code> to a the newly created JFlex scanner. Uses the given <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>.
/// </summary>
/// <param name="input"> The input </param>
- /// <param name="tokenOutput"> One of <seealso cref="#TOKENS_ONLY"/>, <seealso cref="#UNTOKENIZED_ONLY"/>, <seealso cref="#BOTH"/> </param>
+ /// <param name="tokenOutput"> One of <see cref="#TOKENS_ONLY"/>, <see cref="#UNTOKENIZED_ONLY"/>, <see cref="#BOTH"/> </param>
public WikipediaTokenizer(AttributeFactory factory, TextReader input, int tokenOutput, ICollection<string> untokenizedTypes)
: base(factory, input)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
index 4a88289..d63e61a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
@@ -27,13 +27,13 @@ namespace Lucene.Net.Analysis.Wikipedia
*/
/// <summary>
- /// Factory for <seealso cref="WikipediaTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="WikipediaTokenizer"/>.
+ /// <code>
/// <fieldType name="text_wiki" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WikipediaTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class WikipediaTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
index d3a7b1f..b057e7d 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
@@ -24,13 +24,13 @@ namespace Lucene.Net.Collation
/// <summary>
/// <para>
- /// Converts each token into its <seealso cref="CollationKey"/>, and then
+ /// Converts each token into its <see cref="CollationKey"/>, and then
/// encodes the bytes as an index term.
/// </para>
/// <para>
/// <strong>WARNING:</strong> Make sure you use exactly the same Collator at
/// index and query time -- CollationKeys are only comparable when produced by
- /// the same Collator. Since <seealso cref="RuleBasedCollator"/>s are not
+ /// the same Collator. Since <see cref="RuleBasedCollator"/>s are not
/// independently versioned, it is unsafe to search against stored
/// CollationKeys unless the following are exactly the same (best practice is
/// to store this information with the index and check that they remain the
@@ -42,10 +42,10 @@ namespace Lucene.Net.Collation
/// <li>
/// The language (and country and variant, if specified) of the Locale
/// used when constructing the collator via
- /// <seealso cref="Collator#getInstance(Locale)"/>.
+ /// <see cref="Collator#getInstance(Locale)"/>.
/// </li>
/// <li>
- /// The collation strength used - see <seealso cref="Collator#setStrength(int)"/>
+ /// The collation strength used - see <see cref="Collator#setStrength(int)"/>
/// </li>
/// </ol>
/// <para>
@@ -76,7 +76,7 @@ namespace Lucene.Net.Collation
/// <summary>
/// Create a CollationAttributeFactory, using
- /// <seealso cref="AttributeSource.AttributeFactory#DEFAULT_ATTRIBUTE_FACTORY"/> as the
+ /// <see cref="AttributeSource.AttributeFactory#DEFAULT_ATTRIBUTE_FACTORY"/> as the
/// factory for all other attributes. </summary>
/// <param name="collator"> CollationKey generator </param>
public CollationAttributeFactory(Collator collator) : this(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, collator)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
index b76e520..4eabd4d 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
@@ -26,18 +26,18 @@ namespace Lucene.Net.Collation
/// <summary>
/// <para>
- /// Configures <seealso cref="KeywordTokenizer"/> with <seealso cref="CollationAttributeFactory"/>.
+ /// Configures <see cref="KeywordTokenizer"/> with <see cref="CollationAttributeFactory"/>.
/// </para>
/// <para>
- /// Converts the token into its <seealso cref="java.text.CollationKey"/>, and then
+ /// Converts the token into its <see cref="java.text.CollationKey"/>, and then
/// encodes the CollationKey either directly or with
- /// <seealso cref="IndexableBinaryStringTools"/> (see <a href="#version">below</a>), to allow
+ /// <see cref="IndexableBinaryStringTools"/> (see <a href="#version">below</a>), to allow
/// it to be stored as an index term.
/// </para>
/// <para>
/// <strong>WARNING:</strong> Make sure you use exactly the same Collator at
/// index and query time -- CollationKeys are only comparable when produced by
- /// the same Collator. Since <seealso cref="java.text.RuleBasedCollator"/>s are not
+ /// the same Collator. Since <see cref="java.text.RuleBasedCollator"/>s are not
/// independently versioned, it is unsafe to search against stored
/// CollationKeys unless the following are exactly the same (best practice is
/// to store this information with the index and check that they remain the
@@ -49,10 +49,10 @@ namespace Lucene.Net.Collation
/// <li>
/// The language (and country and variant, if specified) of the Locale
/// used when constructing the collator via
- /// <seealso cref="Collator#getInstance(java.util.Locale)"/>.
+ /// <see cref="Collator#getInstance(java.util.Locale)"/>.
/// </li>
/// <li>
- /// The collation strength used - see <seealso cref="Collator#setStrength(int)"/>
+ /// The collation strength used - see <see cref="Collator#setStrength(int)"/>
/// </li>
/// </ol>
/// <para>
@@ -73,11 +73,11 @@ namespace Lucene.Net.Collation
/// ICUCollationKeyAnalyzer on the query side, or vice versa.
/// </para>
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating CollationKeyAnalyzer:
/// <ul>
/// <li> As of 4.0, Collation Keys are directly encoded as bytes. Previous
- /// versions will encode the bytes with <seealso cref="IndexableBinaryStringTools"/>.
+ /// versions will encode the bytes with <see cref="IndexableBinaryStringTools"/>.
/// </ul>
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
index 6e684c1..477e524 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
@@ -25,14 +25,14 @@ namespace Lucene.Net.Collation
/// <summary>
/// <para>
- /// Converts each token into its <seealso cref="java.text.CollationKey"/>, and then
- /// encodes the CollationKey with <seealso cref="IndexableBinaryStringTools"/>, to allow
+ /// Converts each token into its <see cref="java.text.CollationKey"/>, and then
+ /// encodes the CollationKey with <see cref="IndexableBinaryStringTools"/>, to allow
/// it to be stored as an index term.
/// </para>
/// <para>
/// <strong>WARNING:</strong> Make sure you use exactly the same Collator at
/// index and query time -- CollationKeys are only comparable when produced by
- /// the same Collator. Since <seealso cref="java.text.RuleBasedCollator"/>s are not
+ /// the same Collator. Since <see cref="java.text.RuleBasedCollator"/>s are not
/// independently versioned, it is unsafe to search against stored
/// CollationKeys unless the following are exactly the same (best practice is
/// to store this information with the index and check that they remain the
@@ -44,10 +44,10 @@ namespace Lucene.Net.Collation
/// <li>
/// The language (and country and variant, if specified) of the Locale
/// used when constructing the collator via
- /// <seealso cref="Collator#getInstance(CultureInfo)"/>.
+ /// <see cref="Collator#getInstance(CultureInfo)"/>.
/// </li>
/// <li>
- /// The collation strength used - see <seealso cref="Collator#setStrength(int)"/>
+ /// The collation strength used - see <see cref="Collator#setStrength(int)"/>
/// </li>
/// </ol>
/// <para>
@@ -67,7 +67,7 @@ namespace Lucene.Net.Collation
/// CollationKeyFilter to generate index terms, do not use
/// ICUCollationKeyFilter on the query side, or vice versa.
/// </para> </summary>
- /// @deprecated Use <seealso cref="CollationAttributeFactory"/> instead, which encodes
+ /// @deprecated Use <see cref="CollationAttributeFactory"/> instead, which encodes
/// terms directly as bytes. This filter will be removed in Lucene 5.0
[Obsolete("Use <seealso cref=\"CollationAttributeFactory\"/> instead, which encodes")]
// LUCENENET TODO: A better option would be to contribute to the icu.net library and
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
index 4d5ab2a..d5e53a1 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Collation
*/
/// <summary>
- /// Factory for <seealso cref="CollationKeyFilter"/>.
+ /// Factory for <see cref="CollationKeyFilter"/>.
/// <para>
/// This factory can be created in two ways:
/// <ul>
@@ -62,15 +62,15 @@ namespace Lucene.Net.Collation
/// <tokenizer class="solr.KeywordTokenizerFactory"/>
/// <filter class="solr.CollationKeyFilterFactory" language="ja" country="JP"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
///
/// </para>
/// </summary>
- /// <seealso cref="Collator"></seealso>
- /// <seealso cref="CultureInfo"></seealso>
- /// <seealso cref="RuleBasedCollator">
+ /// <see cref="Collator"></seealso>
+ /// <see cref="CultureInfo"></seealso>
+ /// <see cref="RuleBasedCollator">
/// @since solr 3.1 </seealso>
- /// @deprecated use <seealso cref="CollationKeyAnalyzer"/> instead.
+ /// @deprecated use <see cref="CollationKeyAnalyzer"/> instead.
[Obsolete("use <seealso cref=\"CollationKeyAnalyzer\"/> instead.")]
public class CollationKeyFilterFactory : TokenFilterFactory, IMultiTermAwareComponent, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs b/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
index a29a5e8..ceebafb 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Collation.TokenAttributes
*/
/// <summary>
- /// Extension of <seealso cref="CharTermAttribute"/> that encodes the term
+ /// Extension of <see cref="CharTermAttribute"/> that encodes the term
/// text as a binary Unicode collation key instead of as UTF-8 bytes.
/// </summary>
// LUCENENET TODO: A better option would be to contribute to the icu.net library and
[07/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
index 0d0a357..ce3cc41 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Fr
/// refer to http://snowball.sourceforge.net/french/stemmer.html<br>
/// (French stemming algorithm) for details
/// </para> </summary>
- /// @deprecated Use <seealso cref="org.tartarus.snowball.ext.FrenchStemmer"/> instead,
+ /// @deprecated Use <see cref="org.tartarus.snowball.ext.FrenchStemmer"/> instead,
/// which has the same functionality. This filter will be removed in Lucene 4.0
[Obsolete("Use FrenchStemmer instead, which has the same functionality.")]
public class FrenchStemmer
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
index 5dfd573..e1e7e6e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Ga
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Irish.
+ /// <see cref="Analyzer"/> for Irish.
/// </summary>
public sealed class IrishAnalyzer : StopwordAnalyzerBase
{
@@ -92,7 +92,7 @@ namespace Lucene.Net.Analysis.Ga
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public IrishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -111,7 +111,7 @@ namespace Lucene.Net.Analysis.Ga
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -125,15 +125,15 @@ namespace Lucene.Net.Analysis.Ga
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="IrishLowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="IrishLowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs
index d6ae608..c292cd5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Ga
*/
/// <summary>
- /// Factory for <seealso cref="IrishLowerCaseFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="IrishLowerCaseFilter"/>.
+ /// <code>
/// <fieldType name="text_ga" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.IrishLowerCaseFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class IrishLowerCaseFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
index c996ef4..9b471bc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Gl
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Galician.
+ /// <see cref="Analyzer"/> for Galician.
/// </summary>
public sealed class GalicianAnalyzer : StopwordAnalyzerBase
{
@@ -77,7 +77,7 @@ namespace Lucene.Net.Analysis.Gl
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public GalicianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -96,7 +96,7 @@ namespace Lucene.Net.Analysis.Gl
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -110,15 +110,15 @@ namespace Lucene.Net.Analysis.Gl
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="GalicianStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="GalicianStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs
index e1952ad..2b67926 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Gl
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GalicianMinimalStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="GalicianMinimalStemmer"/> to stem
/// Galician words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class GalicianMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs
index 3954829..3fb2221 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Gl
*/
/// <summary>
- /// Factory for <seealso cref="GalicianMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GalicianMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_glplural" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GalicianMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GalicianMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs
index ed11084..3db897e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Gl
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GalicianStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="GalicianStemmer"/> to stem
/// Galician words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class GalicianStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs
index 36fe800..2c47784 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Gl
*/
/// <summary>
- /// Factory for <seealso cref="GalicianStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GalicianStemFilter"/>.
+ /// <code>
/// <fieldType name="text_glstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GalicianStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GalicianStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
index 3888b73..49a1d01 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Analysis.Hi
/// <para>
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating HindiAnalyzer:
/// <ul>
/// <li> As of 3.6, StandardTokenizer is used for tokenization
@@ -110,7 +110,7 @@ namespace Lucene.Net.Analysis.Hi
/// <summary>
/// Builds an analyzer with the default stop words:
- /// <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public HindiAnalyzer(LuceneVersion version)
: this(version, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -119,14 +119,14 @@ namespace Lucene.Net.Analysis.Hi
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="LowerCaseFilter"/>, <seealso cref="IndicNormalizationFilter"/>,
- /// <seealso cref="HindiNormalizationFilter"/>, <seealso cref="SetKeywordMarkerFilter"/>
- /// if a stem exclusion set is provided, <seealso cref="HindiStemFilter"/>, and
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="LowerCaseFilter"/>, <see cref="IndicNormalizationFilter"/>,
+ /// <see cref="HindiNormalizationFilter"/>, <see cref="SetKeywordMarkerFilter"/>
+ /// if a stem exclusion set is provided, <see cref="HindiStemFilter"/>, and
/// Hindi Stop words </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs
index 7502b65..8600e0d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs
@@ -21,13 +21,13 @@ namespace Lucene.Net.Analysis.Hi
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="HindiNormalizer"/> to normalize the
+ /// A <see cref="TokenFilter"/> that applies <see cref="HindiNormalizer"/> to normalize the
/// orthography.
/// <para>
/// In some cases the normalization may cause unrelated terms to conflate, so
/// to prevent terms from being normalized use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para> </summary>
/// <seealso cref= HindiNormalizer </seealso>
public sealed class HindiNormalizationFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs
index 4351770..70f7175 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Hi
*/
/// <summary>
- /// Factory for <seealso cref="HindiNormalizationFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="HindiNormalizationFilter"/>.
+ /// <code>
/// <fieldType name="text_hinormal" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.HindiNormalizationFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class HindiNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs
index ff9981f..ac11063 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Hi
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="HindiStemmer"/> to stem Hindi words.
+ /// A <see cref="TokenFilter"/> that applies <see cref="HindiStemmer"/> to stem Hindi words.
/// </summary>
public sealed class HindiStemFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs
index 1a037c1..cdd897e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Hi
*/
/// <summary>
- /// Factory for <seealso cref="HindiStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="HindiStemFilter"/>.
+ /// <code>
/// <fieldType name="text_histem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.HindiStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class HindiStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
index 57169e9..46fc2ec 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Hu
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Hungarian.
+ /// <see cref="Analyzer"/> for Hungarian.
/// </summary>
public sealed class HungarianAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Hu
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public HungarianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Hu
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Hu
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs
index 5dd0e49..43f33a1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.Hu
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="HungarianLightStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="HungarianLightStemmer"/> to stem
/// Hungarian words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class HungarianLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs
index d8f7c54..2664d63 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Hu
*/
/// <summary>
- /// Factory for <seealso cref="HungarianLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="HungarianLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_hulgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.HungarianLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class HungarianLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
index af966d1..8795529 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
@@ -396,7 +396,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// <param name="affixes"> Map where the result of the parsing will be put </param>
/// <param name="header"> Header line of the affix rule </param>
/// <param name="reader"> BufferedReader to read the content of the rule from </param>
- /// <param name="conditionPattern"> <seealso cref="String#format(String, Object...)"/> pattern to be used to generate the condition regex
+ /// <param name="conditionPattern"> <see cref="String#format(String, Object...)"/> pattern to be used to generate the condition regex
/// pattern </param>
/// <param name="seenPatterns"> map from condition -> index of patterns, for deduplication. </param>
/// <exception cref="IOException"> Can be thrown while reading the rule </exception>
@@ -675,7 +675,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// <summary>
- /// Determines the appropriate <seealso cref="FlagParsingStrategy"/> based on the FLAG definition line taken from the affix file
+ /// Determines the appropriate <see cref="FlagParsingStrategy"/> based on the FLAG definition line taken from the affix file
/// </summary>
/// <param name="flagLine"> Line containing the flag information </param>
/// <returns> FlagParsingStrategy that handles parsing flags in the way specified in the FLAG definition </returns>
@@ -1036,7 +1036,7 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Simple implementation of <seealso cref="FlagParsingStrategy"/> that treats the chars in each String as a individual flags.
+ /// Simple implementation of <see cref="FlagParsingStrategy"/> that treats the chars in each String as a individual flags.
/// Can be used with both the ASCII and UTF-8 flag types.
/// </summary>
private class SimpleFlagParsingStrategy : FlagParsingStrategy
@@ -1048,7 +1048,7 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Implementation of <seealso cref="FlagParsingStrategy"/> that assumes each flag is encoded in its numerical form. In the case
+ /// Implementation of <see cref="FlagParsingStrategy"/> that assumes each flag is encoded in its numerical form. In the case
/// of multiple flags, each number is separated by a comma.
/// </summary>
private class NumFlagParsingStrategy : FlagParsingStrategy
@@ -1080,7 +1080,7 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Implementation of <seealso cref="FlagParsingStrategy"/> that assumes each flag is encoded as two ASCII characters whose codes
+ /// Implementation of <see cref="FlagParsingStrategy"/> that assumes each flag is encoded as two ASCII characters whose codes
/// must be combined into a single character.
///
/// TODO (rmuir) test
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
index 1d9c70f..da38ef8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
@@ -26,13 +26,13 @@ namespace Lucene.Net.Analysis.Hunspell
/// stems, this filter can emit multiple tokens for each consumed token
///
/// <para>
- /// Note: This filter is aware of the <seealso cref="KeywordAttribute"/>. To prevent
+ /// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
/// certain terms from being passed to the stemmer
- /// <seealso cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
- /// in a previous <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+ /// in a previous <see cref="TokenStream"/>.
///
/// Note: For including the original term as well as the stemmed version, see
- /// <seealso cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+ /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
/// </para>
///
/// @lucene.experimental
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.Hunspell
private readonly bool longestOnly;
/// <summary>
- /// Create a <seealso cref="HunspellStemFilter"/> outputting all possible stems. </summary>
+ /// Create a <see cref="HunspellStemFilter"/> outputting all possible stems. </summary>
/// <seealso cref= #HunspellStemFilter(TokenStream, Dictionary, boolean) </seealso>
public HunspellStemFilter(TokenStream input, Dictionary dictionary)
: this(input, dictionary, true)
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Create a <seealso cref="HunspellStemFilter"/> outputting all possible stems. </summary>
+ /// Create a <see cref="HunspellStemFilter"/> outputting all possible stems. </summary>
/// <seealso cref= #HunspellStemFilter(TokenStream, Dictionary, boolean, boolean) </seealso>
public HunspellStemFilter(TokenStream input, Dictionary dictionary, bool dedup)
: this(input, dictionary, dedup, false)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
index 4d720f4..4615260 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
@@ -24,14 +24,14 @@ namespace Lucene.Net.Analysis.Hunspell
*/
/// <summary>
- /// TokenFilterFactory that creates instances of <seealso cref="HunspellStemFilter"/>.
+ /// TokenFilterFactory that creates instances of <see cref="HunspellStemFilter"/>.
/// Example config for British English:
- /// <pre class="prettyprint">
+ /// <code>
/// <filter class="solr.HunspellStemFilterFactory"
/// dictionary="en_GB.dic,my_custom.dic"
/// affix="en_GB.aff"
/// ignoreCase="false"
- /// longestOnly="false" /></pre>
+ /// longestOnly="false" /></code>
/// Both parameters dictionary and affix are mandatory.
/// Dictionaries for many languages are available through the OpenOffice project.
///
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
index 911705b..bec70b7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Hy
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Armenian.
+ /// <see cref="Analyzer"/> for Armenian.
/// </summary>
public sealed class ArmenianAnalyzer : StopwordAnalyzerBase
{
@@ -74,7 +74,7 @@ namespace Lucene.Net.Analysis.Hy
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public ArmenianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -93,7 +93,7 @@ namespace Lucene.Net.Analysis.Hy
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -107,15 +107,15 @@ namespace Lucene.Net.Analysis.Hy
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
index c483c64..44d6c11 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
@@ -72,7 +72,7 @@ namespace Lucene.Net.Analysis.Id
private readonly CharArraySet stemExclusionSet;
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public IndonesianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -93,8 +93,8 @@ namespace Lucene.Net.Analysis.Id
/// <summary>
/// Builds an analyzer with the given stop word. If a none-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
- /// <seealso cref="IndonesianStemFilter"/>.
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
+ /// <see cref="IndonesianStemFilter"/>.
/// </summary>
/// <param name="matchVersion">
/// lucene compatibility version </param>
@@ -110,14 +110,14 @@ namespace Lucene.Net.Analysis.Id
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>,
- /// <seealso cref="StopFilter"/>, <seealso cref="SetKeywordMarkerFilter"/>
- /// if a stem exclusion set is provided and <seealso cref="IndonesianStemFilter"/>. </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>,
+ /// <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/>
+ /// if a stem exclusion set is provided and <see cref="IndonesianStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs
index 84e1e61..a2ac74d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Id
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="IndonesianStemmer"/> to stem Indonesian words.
+ /// A <see cref="TokenFilter"/> that applies <see cref="IndonesianStemmer"/> to stem Indonesian words.
/// </summary>
public sealed class IndonesianStemFilter : TokenFilter
{
@@ -31,7 +31,7 @@ namespace Lucene.Net.Analysis.Id
private readonly bool stemDerivational;
/// <summary>
- /// Calls <seealso cref="#IndonesianStemFilter(TokenStream, boolean) IndonesianStemFilter(input, true)"/>
+ /// Calls <see cref="#IndonesianStemFilter(TokenStream, boolean) IndonesianStemFilter(input, true)"/>
/// </summary>
public IndonesianStemFilter(TokenStream input)
: this(input, true)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs
index 0773391..2944496 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Id
*/
/// <summary>
- /// Factory for <seealso cref="IndonesianStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="IndonesianStemFilter"/>.
+ /// <code>
/// <fieldType name="text_idstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.IndonesianStemFilterFactory" stemDerivational="true"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class IndonesianStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs
index 5128b92..412714a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.In
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="IndicNormalizer"/> to normalize text
+ /// A <see cref="TokenFilter"/> that applies <see cref="IndicNormalizer"/> to normalize text
/// in Indian Languages.
/// </summary>
public sealed class IndicNormalizationFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs
index a96d5b7..9026c7c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.In
*/
/// <summary>
- /// Factory for <seealso cref="IndicNormalizationFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="IndicNormalizationFilter"/>.
+ /// <code>
/// <fieldType name="text_innormal" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.IndicNormalizationFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class IndicNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs
index 2de7baa..d492ff6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.In
/// <summary>
/// Simple Tokenizer for text in Indian Languages. </summary>
- /// @deprecated (3.6) Use <seealso cref="StandardTokenizer"/> instead.
+ /// @deprecated (3.6) Use <see cref="StandardTokenizer"/> instead.
[Obsolete("(3.6) Use StandardTokenizer instead.")]
public sealed class IndicTokenizer : CharTokenizer
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
index cf00799..be81d75 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
@@ -30,11 +30,11 @@ namespace Lucene.Net.Analysis.It
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Italian.
+ /// <see cref="Analyzer"/> for Italian.
/// <para>
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating ItalianAnalyzer:
/// <ul>
/// <li> As of 3.6, ItalianLightStemFilter is used for less aggressive stemming.
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.It
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public ItalianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -117,7 +117,7 @@ namespace Lucene.Net.Analysis.It
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -131,15 +131,15 @@ namespace Lucene.Net.Analysis.It
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="ElisionFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="ItalianLightStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="ItalianLightStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs
index b772db3..f86d45d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.It
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="ItalianLightStemmer"/> to stem Italian
+ /// A <see cref="TokenFilter"/> that applies <see cref="ItalianLightStemmer"/> to stem Italian
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class ItalianLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs
index 1caa912..1ea8ee5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.It
*/
/// <summary>
- /// Factory for <seealso cref="ItalianLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ItalianLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_itlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.ItalianLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ItalianLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
index 2ecfd82..26b5074 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Lv
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Latvian.
+ /// <see cref="Analyzer"/> for Latvian.
/// </summary>
public sealed class LatvianAnalyzer : StopwordAnalyzerBase
{
@@ -77,7 +77,7 @@ namespace Lucene.Net.Analysis.Lv
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public LatvianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -96,7 +96,7 @@ namespace Lucene.Net.Analysis.Lv
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -110,15 +110,15 @@ namespace Lucene.Net.Analysis.Lv
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="LatvianStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="LatvianStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs
index 22ea386..8a373fa 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.Lv
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="LatvianStemmer"/> to stem Latvian
+ /// A <see cref="TokenFilter"/> that applies <see cref="LatvianStemmer"/> to stem Latvian
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class LatvianStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs
index d953f94..33b3789 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Lv
*/
/// <summary>
- /// Factory for <seealso cref="LatvianStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="LatvianStemFilter"/>.
+ /// <code>
/// <fieldType name="text_lvstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.LatvianStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class LatvianStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
index db59e0c..76bb80a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
@@ -67,7 +67,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Create a new <seealso cref="ASCIIFoldingFilter"/>.
+ /// Create a new <see cref="ASCIIFoldingFilter"/>.
/// </summary>
/// <param name="input">
/// TokenStream to filter </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs
index 5155908..74a0d33 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="ASCIIFoldingFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ASCIIFoldingFilter"/>.
+ /// <code>
/// <fieldType name="text_ascii" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ASCIIFoldingFilterFactory" preserveOriginal="false"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ASCIIFoldingFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
index d68f881..236d6da 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="CapitalizationFilter"/>.
+ /// Factory for <see cref="CapitalizationFilter"/>.
/// <p/>
/// The factory takes parameters:<br/>
/// "onlyFirstWord" - should each word be capitalized or all of the words?<br/>
@@ -37,7 +37,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// "maxWordCount" - if the token contains more then maxWordCount words, the capitalization is
/// assumed to be correct.<br/>
///
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="text_cptlztn" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -45,7 +45,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// keep="java solr lucene" keepIgnoreCase="false"
/// okPrefix="McK McD McA"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
///
/// @since solr 1.3
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs
index 2b6f70b..1c12925 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs
@@ -37,11 +37,11 @@ namespace Lucene.Net.Analysis.Miscellaneous
private readonly ICharTermAttribute termAtt;
/// <summary>
- /// Create a new <seealso cref="CodepointCountFilter"/>. This will filter out tokens whose
- /// <seealso cref="CharTermAttribute"/> is either too short (<seealso cref="Character#CodePointCount(char[], int, int)"/>
- /// < min) or too long (<seealso cref="Character#codePointCount(char[], int, int)"/> > max). </summary>
+ /// Create a new <see cref="CodepointCountFilter"/>. This will filter out tokens whose
+ /// <see cref="CharTermAttribute"/> is either too short (<see cref="Character#CodePointCount(char[], int, int)"/>
+ /// < min) or too long (<see cref="Character#codePointCount(char[], int, int)"/> > max). </summary>
/// <param name="version"> the Lucene match version </param>
- /// <param name="in"> the <seealso cref="TokenStream"/> to consume </param>
+ /// <param name="in"> the <see cref="TokenStream"/> to consume </param>
/// <param name="min"> the minimum length </param>
/// <param name="max"> the maximum length </param>
public CodepointCountFilter(LuceneVersion version, TokenStream @in, int min, int max)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs
index 23c678f..4163aec 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="CodepointCountFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="CodepointCountFilter"/>.
+ /// <code>
/// <fieldType name="text_lngth" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.CodepointCountFilterFactory" min="0" max="1" />
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class CodepointCountFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs
index 3b8f7d9..c5da204 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// In order to increase search efficiency, this filter puts hyphenated words broken into two lines back together.
/// This filter should be used on indexing time only.
/// Example field definition in schema.xml:
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldtype name="text" class="solr.TextField" positionIncrementGap="100">
/// <analyzer type="index">
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -47,7 +47,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
/// </analyzer>
/// </fieldtype>
- /// </pre>
+ /// </code>
///
/// </summary>
public sealed class HyphenatedWordsFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs
index 526885c..6c4d375 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="HyphenatedWordsFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="HyphenatedWordsFilter"/>.
+ /// <code>
/// <fieldType name="text_hyphn" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.HyphenatedWordsFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class HyphenatedWordsFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
index 82ec1bc..0ff278c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
@@ -43,13 +43,13 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Create a new <seealso cref="KeepWordFilter"/>.
+ /// Create a new <see cref="KeepWordFilter"/>.
/// <para><b>NOTE</b>: The words set passed to this constructor will be directly
/// used by this filter and should not be modified.
/// </para>
/// </summary>
/// <param name="version"> the Lucene match version </param>
- /// <param name="in"> the <seealso cref="TokenStream"/> to consume </param>
+ /// <param name="in"> the <see cref="TokenStream"/> to consume </param>
/// <param name="words"> the words to keep </param>
public KeepWordFilter(LuceneVersion version, TokenStream @in, CharArraySet words)
: base(version, @in)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs
index 99c3fc9..39f61bf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="KeepWordFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="KeepWordFilter"/>.
+ /// <code>
/// <fieldType name="text_keepword" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.KeepWordFilterFactory" words="keepwords.txt" ignoreCase="false"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class KeepWordFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs
index 722ce4b..ab8a884 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
* limitations under the License.
*/
/// <summary>
- /// Marks terms as keywords via the <seealso cref="KeywordAttribute"/>.
+ /// Marks terms as keywords via the <see cref="KeywordAttribute"/>.
/// </summary>
/// <seealso cref= KeywordAttribute </seealso>
public abstract class KeywordMarkerFilter : TokenFilter
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
private readonly IKeywordAttribute keywordAttr;
/// <summary>
- /// Creates a new <seealso cref="KeywordMarkerFilter"/> </summary>
+ /// Creates a new <see cref="KeywordMarkerFilter"/> </summary>
/// <param name="in"> the input stream </param>
protected internal KeywordMarkerFilter(TokenStream @in)
: base(@in)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs
index 9705ff6..0070b74 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="KeywordMarkerFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="KeywordMarkerFilter"/>.
+ /// <code>
/// <fieldType name="text_keyword" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.KeywordMarkerFilterFactory" protected="protectedkeyword.txt" pattern="^.+er$" ignoreCase="false"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class KeywordMarkerFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs
index e770a56..4cfe3e8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
/// This TokenFilter�emits each incoming token twice once as keyword and once non-keyword, in other words once with
- /// <seealso cref="KeywordAttribute#setKeyword(boolean)"/> set to <code>true</code> and once set to <code>false</code>.
- /// This is useful if used with a stem filter that respects the <seealso cref="KeywordAttribute"/> to index the stemmed and the
+ /// <see cref="KeywordAttribute#setKeyword(boolean)"/> set to <code>true</code> and once set to <code>false</code>.
+ /// This is useful if used with a stem filter that respects the <see cref="KeywordAttribute"/> to index the stemmed and the
/// un-stemmed version of a term into the same field.
/// </summary>
public sealed class KeywordRepeatFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs
index 842ab95..c34561a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs
@@ -21,11 +21,11 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="KeywordRepeatFilter"/>.
+ /// Factory for <see cref="KeywordRepeatFilter"/>.
///
- /// Since <seealso cref="KeywordRepeatFilter"/> emits two tokens for every input token, and any tokens that aren't transformed
+ /// Since <see cref="KeywordRepeatFilter"/> emits two tokens for every input token, and any tokens that aren't transformed
/// later in the analysis chain will be in the document twice. Therefore, consider adding
- /// <seealso cref="RemoveDuplicatesTokenFilterFactory"/> later in the analysis chain.
+ /// <see cref="RemoveDuplicatesTokenFilterFactory"/> later in the analysis chain.
/// </summary>
public sealed class KeywordRepeatFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs
index e02fd24..ab19c3a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs
@@ -54,11 +54,11 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Create a new <seealso cref="LengthFilter"/>. This will filter out tokens whose
- /// <seealso cref="CharTermAttribute"/> is either too short (<seealso cref="CharTermAttribute#length()"/>
- /// < min) or too long (<seealso cref="CharTermAttribute#length()"/> > max). </summary>
+ /// Create a new <see cref="LengthFilter"/>. This will filter out tokens whose
+ /// <see cref="CharTermAttribute"/> is either too short (<see cref="CharTermAttribute#length()"/>
+ /// < min) or too long (<see cref="CharTermAttribute#length()"/> > max). </summary>
/// <param name="version"> the Lucene match version </param>
- /// <param name="in"> the <seealso cref="TokenStream"/> to consume </param>
+ /// <param name="in"> the <see cref="TokenStream"/> to consume </param>
/// <param name="min"> the minimum length </param>
/// <param name="max"> the maximum length </param>
public LengthFilter(LuceneVersion version, TokenStream @in, int min, int max)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs
index 019e611..f206b4b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="LengthFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="LengthFilter"/>.
+ /// <code>
/// <fieldType name="text_lngth" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.LengthFilterFactory" min="0" max="1" />
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class LengthFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs
index 0e51d49..3bafb19 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs
@@ -19,7 +19,7 @@
/// <summary>
/// This Analyzer limits the number of tokens while indexing. It is
- /// a replacement for the maximum field length setting inside <seealso cref="org.apache.lucene.index.IndexWriter"/>. </summary>
+ /// a replacement for the maximum field length setting inside <see cref="org.apache.lucene.index.IndexWriter"/>. </summary>
/// <seealso cref= LimitTokenCountFilter </seealso>
public sealed class LimitTokenCountAnalyzer : AnalyzerWrapper
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
index 8202f8c..9e4a37c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
@@ -19,7 +19,7 @@
/// <summary>
/// This TokenFilter limits the number of tokens while indexing. It is
- /// a replacement for the maximum field length setting inside <seealso cref="org.apache.lucene.index.IndexWriter"/>.
+ /// a replacement for the maximum field length setting inside <see cref="org.apache.lucene.index.IndexWriter"/>.
/// <para>
/// By default, this filter ignores any tokens in the wrapped {@code TokenStream}
/// once the limit has been reached, which can result in {@code reset()} being
@@ -28,7 +28,7 @@
/// then consuming the full stream. If you are wrapping a {@code TokenStream}
/// which requires that the full stream of tokens be exhausted in order to
/// function properly, use the
- /// <seealso cref="#LimitTokenCountFilter(TokenStream,int,boolean) consumeAllTokens"/>
+ /// <see cref="#LimitTokenCountFilter(TokenStream,int,boolean) consumeAllTokens"/>
/// option.
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs
index 3644202..25b980d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs
@@ -21,17 +21,17 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="LimitTokenCountFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="LimitTokenCountFilter"/>.
+ /// <code>
/// <fieldType name="text_lngthcnt" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10" consumeAllTokens="false" />
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// <para>
/// The {@code consumeAllTokens} property is optional and defaults to {@code false}.
- /// See <seealso cref="LimitTokenCountFilter"/> for an explanation of it's use.
+ /// See <see cref="LimitTokenCountFilter"/> for an explanation of it's use.
/// </para>
/// </summary>
public class LimitTokenCountFilterFactory : TokenFilterFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
index d43d23c..008ff97 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// then consuming the full stream. If you are wrapping a {@code TokenStream}
/// which requires that the full stream of tokens be exhausted in order to
/// function properly, use the
- /// <seealso cref="#LimitTokenPositionFilter(TokenStream,int,boolean) consumeAllTokens"/>
+ /// <see cref="#LimitTokenPositionFilter(TokenStream,int,boolean) consumeAllTokens"/>
/// option.
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs
index 22fb345..e2d7692 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs
@@ -21,17 +21,17 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="LimitTokenPositionFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="LimitTokenPositionFilter"/>.
+ /// <code>
/// <fieldType name="text_limit_pos" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.LimitTokenPositionFilterFactory" maxTokenPosition="3" consumeAllTokens="false" />
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// <para>
/// The {@code consumeAllTokens} property is optional and defaults to {@code false}.
- /// See <seealso cref="LimitTokenPositionFilter"/> for an explanation of its use.
+ /// See <see cref="LimitTokenPositionFilter"/> for an explanation of its use.
/// </para>
/// </summary>
public class LimitTokenPositionFilterFactory : TokenFilterFactory
[10/13] lucenenet git commit: Lucene.Net.Analysis.El refactor: member
accessibility and documentation comments
Posted by ni...@apache.org.
Lucene.Net.Analysis.El refactor: member accessibility and documentation comments
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/d2a16d03
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/d2a16d03
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/d2a16d03
Branch: refs/heads/api-work
Commit: d2a16d0312c64768c56e8740663caee18840ce21
Parents: 217f113
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 11:22:44 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 11:22:44 2017 +0700
----------------------------------------------------------------------
.../Analysis/El/GreekAnalyzer.cs | 23 +++++++--------
.../Analysis/El/GreekLowerCaseFilter.cs | 15 +++++-----
.../Analysis/El/GreekLowerCaseFilterFactory.cs | 3 +-
.../Analysis/El/GreekStemFilter.cs | 4 +--
.../Analysis/El/GreekStemFilterFactory.cs | 3 +-
.../Analysis/El/GreekStemmer.cs | 31 ++++++++++----------
6 files changed, 36 insertions(+), 43 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d2a16d03/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
index 56024bd..4ea3845 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
@@ -31,18 +31,15 @@ namespace Lucene.Net.Analysis.El
/// that will not be indexed at all).
/// A default set of stopwords is used unless an alternative list is specified.
/// </para>
- ///
- /// <a name="version"/>
/// <para>You must specify the required <see cref="LuceneVersion"/>
- /// compatibility when creating GreekAnalyzer:
- /// <ul>
- /// <li> As of 3.1, StandardFilter and GreekStemmer are used by default.
- /// <li> As of 2.9, StopFilter preserves position
- /// increments
- /// </ul>
- ///
+ /// compatibility when creating <see cref="GreekAnalyzer"/>:
+ /// <list type="bullet">
+ /// <item> As of 3.1, StandardFilter and GreekStemmer are used by default.</item>
+ /// <item> As of 2.9, StopFilter preserves position
+ /// increments</item>
+ /// </list>
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// <para><c>NOTE</c>: This class uses the same <see cref="LuceneVersion"/>
/// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class GreekAnalyzer : StopwordAnalyzerBase
@@ -85,7 +82,7 @@ namespace Lucene.Net.Analysis.El
/// <summary>
/// Builds an analyzer with the default stop words. </summary>
/// <param name="matchVersion"> Lucene compatibility version,
- /// See <a href="#version">above</a> </param>
+ /// See <see cref="LuceneVersion"/> </param>
public GreekAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_SET)
{
@@ -100,7 +97,7 @@ namespace Lucene.Net.Analysis.El
/// </para>
/// </summary>
/// <param name="matchVersion"> Lucene compatibility version,
- /// See <a href="#version">above</a> </param>
+ /// See <see cref="LuceneVersion"/> </param>
/// <param name="stopwords"> a stopword set </param>
public GreekAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords)
: base(matchVersion, stopwords)
@@ -110,7 +107,7 @@ namespace Lucene.Net.Analysis.El
/// <summary>
/// Creates
/// <see cref="Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <see cref="Reader"/>.
+ /// used to tokenize all the text in the provided <see cref="TextReader"/>.
/// </summary>
/// <returns> <see cref="Analyzer.TokenStreamComponents"/>
/// built from a <see cref="StandardTokenizer"/> filtered with
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d2a16d03/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
index 559e15e..69cb9f7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
@@ -25,12 +25,11 @@ namespace Lucene.Net.Analysis.El
/// <summary>
/// Normalizes token text to lower case, removes some Greek diacritics,
/// and standardizes final sigma to sigma.
- /// <a name="version"/>
/// <para>You must specify the required <see cref="LuceneVersion"/>
- /// compatibility when creating GreekLowerCaseFilter:
- /// <ul>
- /// <li> As of 3.1, supplementary characters are properly lowercased.
- /// </ul>
+ /// compatibility when creating <see cref="GreekLowerCaseFilter"/>:
+ /// <list type="bullet">
+ /// <item> As of 3.1, supplementary characters are properly lowercased.</item>
+ /// </list>
/// </para>
/// </summary>
public sealed class GreekLowerCaseFilter : TokenFilter
@@ -39,11 +38,11 @@ namespace Lucene.Net.Analysis.El
private readonly CharacterUtils charUtils;
/// <summary>
- /// Create a GreekLowerCaseFilter that normalizes Greek token text.
+ /// Create a <see cref="GreekLowerCaseFilter"/> that normalizes Greek token text.
/// </summary>
/// <param name="matchVersion"> Lucene compatibility version,
- /// See <a href="#version">above</a> </param>
- /// <param name="in"> TokenStream to filter </param>
+ /// See <see cref="LuceneVersion"/> </param>
+ /// <param name="in"> <see cref="TokenStream"/> to filter </param>
public GreekLowerCaseFilter(LuceneVersion matchVersion, TokenStream @in)
: base(@in)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d2a16d03/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
index 2e2daee..65fb382 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
@@ -32,9 +32,8 @@ namespace Lucene.Net.Analysis.El
/// </summary>
public class GreekLowerCaseFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
-
/// <summary>
- /// Creates a new GreekLowerCaseFilterFactory </summary>
+ /// Creates a new <see cref="GreekLowerCaseFilterFactory"/> </summary>
public GreekLowerCaseFilterFactory(IDictionary<string, string> args) : base(args)
{
AssureMatchVersion();
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d2a16d03/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
index 39b77f9..94566f1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
@@ -24,13 +24,13 @@ namespace Lucene.Net.Analysis.El
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// <see cref="Miscellaneous.SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
/// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// <para>
/// NOTE: Input is expected to be casefolded for Greek (including folding of final
/// sigma to sigma), and with diacritics removed. This can be achieved by using
- /// either <see cref="GreekLowerCaseFilter"/> or ICUFoldingFilter before GreekStemFilter.
+ /// either <see cref="GreekLowerCaseFilter"/> or ICUFoldingFilter before <see cref="GreekStemFilter"/>.
/// @lucene.experimental
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d2a16d03/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
index c09df42..c916e8f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
@@ -33,9 +33,8 @@ namespace Lucene.Net.Analysis.El
/// </summary>
public class GreekStemFilterFactory : TokenFilterFactory
{
-
/// <summary>
- /// Creates a new GreekStemFilterFactory </summary>
+ /// Creates a new <see cref="GreekStemFilterFactory"/> </summary>
public GreekStemFilterFactory(IDictionary<string, string> args)
: base(args)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d2a16d03/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
index 4e6dda3..1a5e8b3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// A stemmer for Greek words, according to: <i>Development of a Stemmer for the
- /// Greek Language.</i> Georgios Ntais
+ /// A stemmer for Greek words, according to: <c>Development of a Stemmer for the
+ /// Greek Language.</c> Georgios Ntais
/// <para>
/// NOTE: Input is expected to be casefolded for Greek (including folding of final
/// sigma to sigma), and with diacritics removed. This can be achieved with
@@ -33,13 +33,12 @@ namespace Lucene.Net.Analysis.El
/// </summary>
public class GreekStemmer
{
-
/// <summary>
- /// Stems a word contained in a leading portion of a char[] array.
+ /// Stems a word contained in a leading portion of a <see cref="char[]"/> array.
/// The word is passed through a number of rules that modify it's length.
/// </summary>
- /// <param name="s"> A char[] array that contains the word to be stemmed. </param>
- /// <param name="len"> The length of the char[] array. </param>
+ /// <param name="s"> A <see cref="char[]"/> array that contains the word to be stemmed. </param>
+ /// <param name="len"> The length of the <see cref="char[]"/> array. </param>
/// <returns> The new length of the stemmed word. </returns>
public virtual int Stem(char[] s, int len)
{
@@ -77,7 +76,7 @@ namespace Lucene.Net.Analysis.El
len = Rule21(s, len);
}
- return rule22(s, len);
+ return Rule22(s, len);
}
private int Rule0(char[] s, int len)
@@ -974,7 +973,7 @@ namespace Lucene.Net.Analysis.El
return len;
}
- private int rule22(char[] s, int len)
+ private int Rule22(char[] s, int len)
{
if (StemmerUtil.EndsWith(s, len, "\u03b5\u03c3\u03c4\u03b5\u03c1") ||
StemmerUtil.EndsWith(s, len, "\u03b5\u03c3\u03c4\u03b1\u03c4"))
@@ -1022,12 +1021,12 @@ namespace Lucene.Net.Analysis.El
}
/// <summary>
- /// Checks if the word contained in the leading portion of char[] array ,
+ /// Checks if the word contained in the leading portion of <see cref="char[]"/> array ,
/// ends with a Greek vowel.
/// </summary>
- /// <param name="s"> A char[] array that represents a word. </param>
- /// <param name="len"> The length of the char[] array. </param>
- /// <returns> True if the word contained in the leading portion of char[] array ,
+ /// <param name="s"> A <see cref="char[]"/> array that represents a word. </param>
+ /// <param name="len"> The length of the <see cref="char[]"/> array. </param>
+ /// <returns> True if the word contained in the leading portion of <see cref="char[]"/> array ,
/// ends with a vowel , false otherwise. </returns>
private bool EndsWithVowel(char[] s, int len)
{
@@ -1051,12 +1050,12 @@ namespace Lucene.Net.Analysis.El
}
/// <summary>
- /// Checks if the word contained in the leading portion of char[] array ,
+ /// Checks if the word contained in the leading portion of <see cref="char[]"/> array ,
/// ends with a Greek vowel.
/// </summary>
- /// <param name="s"> A char[] array that represents a word. </param>
- /// <param name="len"> The length of the char[] array. </param>
- /// <returns> True if the word contained in the leading portion of char[] array ,
+ /// <param name="s"> A <see cref="char[]"/> array that represents a word. </param>
+ /// <param name="len"> The length of the <see cref="char[]"/> array. </param>
+ /// <returns> True if the word contained in the leading portion of <see cref="char[]"/> array ,
/// ends with a vowel , false otherwise. </returns>
private bool EndsWithVowelNoY(char[] s, int len)
{
[11/13] lucenenet git commit: Lucene.Net.Analysis.Es refactor: member
accessibility and documentation comments
Posted by ni...@apache.org.
Lucene.Net.Analysis.Es refactor: member accessibility and documentation comments
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/3d97e6a2
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/3d97e6a2
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/3d97e6a2
Branch: refs/heads/api-work
Commit: 3d97e6a23826c426ad94420e39a225a510082066
Parents: d2a16d0
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 11:29:26 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 11:29:26 2017 +0700
----------------------------------------------------------------------
.../Analysis/Es/SpanishAnalyzer.cs | 23 ++++++++++----------
.../Analysis/Es/SpanishLightStemFilter.cs | 2 +-
.../Es/SpanishLightStemFilterFactory.cs | 2 +-
.../Analysis/Es/SpanishLightStemmer.cs | 3 +--
4 files changed, 14 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3d97e6a2/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
index edcde59..d6f6707 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
@@ -31,13 +31,12 @@ namespace Lucene.Net.Analysis.Es
/// <summary>
/// <see cref="Analyzer"/> for Spanish.
/// <para>
- /// <a name="version"/>
/// </para>
/// <para>You must specify the required <see cref="LuceneVersion"/>
- /// compatibility when creating SpanishAnalyzer:
- /// <ul>
- /// <li> As of 3.6, SpanishLightStemFilter is used for less aggressive stemming.
- /// </ul>
+ /// compatibility when creating <see cref="SpanishAnalyzer"/>:
+ /// <list type="bullet">
+ /// <item> As of 3.6, <see cref="SpanishLightStemFilter"/> is used for less aggressive stemming.</item>
+ /// </list>
/// </para>
/// </summary>
public sealed class SpanishAnalyzer : StopwordAnalyzerBase
@@ -60,7 +59,7 @@ namespace Lucene.Net.Analysis.Es
}
/// <summary>
- /// Atomically loads the DEFAULT_STOP_SET in a lazy fashion once the outer class
+ /// Atomically loads the <see cref="DEFAULT_STOP_SET"/> in a lazy fashion once the outer class
/// accesses the static final set the first time.;
/// </summary>
private class DefaultSetHolder
@@ -88,7 +87,7 @@ namespace Lucene.Net.Analysis.Es
}
/// <summary>
- /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public SpanishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +97,7 @@ namespace Lucene.Net.Analysis.Es
/// <summary>
/// Builds an analyzer with the given stop words.
/// </summary>
- /// <param name="matchVersion"> lucene compatibility version </param>
+ /// <param name="matchVersion"> <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords"> a stopword set </param>
public SpanishAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords)
: this(matchVersion, stopwords, CharArraySet.EMPTY_SET)
@@ -110,7 +109,7 @@ namespace Lucene.Net.Analysis.Es
/// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
- /// <param name="matchVersion"> lucene compatibility version </param>
+ /// <param name="matchVersion"> <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords"> a stopword set </param>
/// <param name="stemExclusionSet"> a set of terms not to be stemmed </param>
public SpanishAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords, CharArraySet stemExclusionSet)
@@ -122,13 +121,13 @@ namespace Lucene.Net.Analysis.Es
/// <summary>
/// Creates a
/// <see cref="Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <see cref="Reader"/>.
+ /// which tokenizes all the text in the provided <see cref="TextReader"/>.
/// </summary>
/// <returns> A
/// <see cref="Analyzer.TokenStreamComponents"/>
/// built from an <see cref="StandardTokenizer"/> filtered with
- /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
- /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
+ /// <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
/// provided and <see cref="SpanishLightStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3d97e6a2/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
index b7810fc..16f1461 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Es
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// <see cref="Miscellaneous.SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
/// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3d97e6a2/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
index 31ef33b..f1404a6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.Es
{
/// <summary>
- /// Creates a new SpanishLightStemFilterFactory </summary>
+ /// Creates a new <see cref="SpanishLightStemFilterFactory"/> </summary>
public SpanishLightStemFilterFactory(IDictionary<string, string> args) : base(args)
{
if (args.Count > 0)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3d97e6a2/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemmer.cs
index 5560650..3acafc0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemmer.cs
@@ -56,13 +56,12 @@
/// Light Stemmer for Spanish
/// <para>
/// This stemmer implements the algorithm described in:
- /// <i>Report on CLEF-2001 Experiments</i>
+ /// <c>Report on CLEF-2001 Experiments</c>
/// Jacques Savoy
/// </para>
/// </summary>
public class SpanishLightStemmer
{
-
public virtual int Stem(char[] s, int len)
{
if (len < 5)
[02/13] lucenenet git commit: Lucene.Net.Analysis.Common: fixes for
some documentation comments previously missed
Posted by ni...@apache.org.
Lucene.Net.Analysis.Common: fixes for some documentation comments previously missed
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/829f8ee7
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/829f8ee7
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/829f8ee7
Branch: refs/heads/api-work
Commit: 829f8ee75a7767bb8730fcdef1d031c90ff92d5a
Parents: 363ea8e
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 10:32:35 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 10:32:35 2017 +0700
----------------------------------------------------------------------
.../Analysis/Ar/ArabicLetterTokenizer.cs | 6 +++---
src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs | 2 +-
.../Analysis/CharFilter/NormalizeCharMap.cs | 4 ++--
.../Analysis/Compound/HyphenationCompoundWordTokenFilter.cs | 4 ++--
4 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/829f8ee7/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
index 0e4e28c..c698d5c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
@@ -47,8 +47,8 @@ namespace Lucene.Net.Analysis.Ar
{
/// <summary>
/// Construct a new ArabicLetterTokenizer. </summary>
- /// <param name="matchVersion"> Lucene version
- /// to match See <seealso cref="<a href="#version">above</a>"/>
+ /// <param name="matchVersion"> <see cref="LuceneVersion"/>
+ /// to match
/// </param>
/// <param name="in">
/// the input to split up into tokens </param>
@@ -76,7 +76,7 @@ namespace Lucene.Net.Analysis.Ar
/// <summary>
/// Allows for Letter category or NonspacingMark category </summary>
- /// <seealso cref="LetterTokenizer.IsTokenChar(int)"/>
+ /// <see cref="LetterTokenizer.IsTokenChar(int)"/>
protected override bool IsTokenChar(int c)
{
return base.IsTokenChar(c) || Character.GetType(c) == UnicodeCategory.NonSpacingMark;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/829f8ee7/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
index 704f543..a33ebb6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
@@ -125,7 +125,7 @@ namespace Lucene.Net.Analysis.Ca
/// </summary>
/// <returns> A
/// <see cref="Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
+ /// built from an <see cref="StandardTokenizer"/> filtered with
/// <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>, <see cref="LowerCaseFilter"/>,
/// <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
/// provided and <see cref="SnowballFilter"/>. </returns>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/829f8ee7/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
index bcb031a..110790f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
@@ -108,8 +108,8 @@ namespace Lucene.Net.Analysis.CharFilters
}
/// <summary>
- /// Builds the NormalizeCharMap; call this once you
- /// are done calling <seealso cref="#add"/>.
+ /// Builds the <see cref="NormalizeCharMap"/>; call this once you
+ /// are done calling <see cref="Add"/>.
/// </summary>
public virtual NormalizeCharMap Build()
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/829f8ee7/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
index 83a1a46..533b76e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
@@ -53,7 +53,7 @@ namespace Lucene.Net.Analysis.Compound
/// href="CompoundWordTokenFilterBase.html#version"
/// >CompoundWordTokenFilterBase</a> for details. </param>
/// <param name="input">
- /// the <seealso cref="TokenStream"/> to process </param>
+ /// the <see cref="TokenStream"/> to process </param>
/// <param name="hyphenator">
/// the hyphenation pattern tree to use for hyphenation </param>
/// <param name="dictionary">
@@ -74,7 +74,7 @@ namespace Lucene.Net.Analysis.Compound
/// href="CompoundWordTokenFilterBase.html#version"
/// >CompoundWordTokenFilterBase</a> for details. </param>
/// <param name="input">
- /// the <seealso cref="TokenStream"/> to process </param>
+ /// the <see cref="TokenStream"/> to process </param>
/// <param name="hyphenator">
/// the hyphenation pattern tree to use for hyphenation </param>
/// <param name="dictionary">
[13/13] lucenenet git commit: Lucene.Net.Analysis.Fa refactor: member
accessibility and documentation comments
Posted by ni...@apache.org.
Lucene.Net.Analysis.Fa refactor: member accessibility and documentation comments
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/20087487
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/20087487
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/20087487
Branch: refs/heads/api-work
Commit: 20087487f3bd05aa588c3775a7852b5ac2af53c2
Parents: 98edabe
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 11:40:38 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 11:40:38 2017 +0700
----------------------------------------------------------------------
.../Analysis/Fa/PersianAnalyzer.cs | 13 ++++++-------
.../Analysis/Fa/PersianCharFilter.cs | 2 +-
.../Analysis/Fa/PersianCharFilterFactory.cs | 6 +++---
.../Analysis/Fa/PersianNormalizationFilter.cs | 1 -
.../Analysis/Fa/PersianNormalizationFilterFactory.cs | 3 +--
.../Analysis/Fa/PersianNormalizer.cs | 11 +++++------
6 files changed, 16 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/20087487/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
index 97943be..440c48c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
@@ -35,12 +35,11 @@ namespace Lucene.Net.Analysis.Fa
/// </summary>
public sealed class PersianAnalyzer : StopwordAnalyzerBase
{
-
/// <summary>
/// File containing default Persian stopwords.
///
/// Default stopword list is from
- /// http://members.unine.ch/jacques.savoy/clef/index.html The stopword list is
+ /// http://members.unine.ch/jacques.savoy/clef/index.html. The stopword list is
/// BSD-Licensed.
///
/// </summary>
@@ -64,7 +63,7 @@ namespace Lucene.Net.Analysis.Fa
}
/// <summary>
- /// Atomically loads the DEFAULT_STOP_SET in a lazy fashion once the outer class
+ /// Atomically loads the <see cref="DEFAULT_STOP_SET"/> in a lazy fashion once the outer class
/// accesses the static final set the first time.;
/// </summary>
private class DefaultSetHolder
@@ -89,7 +88,7 @@ namespace Lucene.Net.Analysis.Fa
/// <summary>
/// Builds an analyzer with the default stop words:
- /// <see cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// <see cref="DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public PersianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -100,7 +99,7 @@ namespace Lucene.Net.Analysis.Fa
/// Builds an analyzer with the given stop words
/// </summary>
/// <param name="matchVersion">
- /// lucene compatibility version </param>
+ /// <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords">
/// a stopword set </param>
public PersianAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords)
@@ -111,7 +110,7 @@ namespace Lucene.Net.Analysis.Fa
/// <summary>
/// Creates
/// <see cref="Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <see cref="Reader"/>.
+ /// used to tokenize all the text in the provided <see cref="TextReader"/>.
/// </summary>
/// <returns> <see cref="Analyzer.TokenStreamComponents"/>
/// built from a <see cref="StandardTokenizer"/> filtered with
@@ -144,7 +143,7 @@ namespace Lucene.Net.Analysis.Fa
}
/// <summary>
- /// Wraps the TextReader with <see cref="PersianCharFilter"/>
+ /// Wraps the <see cref="TextReader"/> with <see cref="PersianCharFilter"/>
/// </summary>
protected override TextReader InitReader(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/20087487/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilter.cs
index bea0cde..ef3ac9d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilter.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// CharFilter that replaces instances of Zero-width non-joiner with an
+ /// <see cref="CharFilter"/> that replaces instances of Zero-width non-joiner with an
/// ordinary space.
/// </summary>
public class PersianCharFilter : CharFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/20087487/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
index 260b530..af27d94 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
@@ -33,10 +33,10 @@ namespace Lucene.Net.Analysis.Fa
/// </summary>
public class PersianCharFilterFactory : CharFilterFactory, IMultiTermAwareComponent
{
-
/// <summary>
- /// Creates a new PersianCharFilterFactory </summary>
- public PersianCharFilterFactory(IDictionary<string, string> args) : base(args)
+ /// Creates a new <see cref="PersianCharFilterFactory"/> </summary>
+ public PersianCharFilterFactory(IDictionary<string, string> args)
+ : base(args)
{
if (args.Count > 0)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/20087487/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
index f3338ab..85baa98 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
@@ -22,7 +22,6 @@ namespace Lucene.Net.Analysis.Fa
/// <summary>
/// A <see cref="TokenFilter"/> that applies <see cref="PersianNormalizer"/> to normalize the
/// orthography.
- ///
/// </summary>
public sealed class PersianNormalizationFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/20087487/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
index be205af..550ed0d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
@@ -33,9 +33,8 @@ namespace Lucene.Net.Analysis.Fa
/// </summary>
public class PersianNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
-
/// <summary>
- /// Creates a new PersianNormalizationFilterFactory </summary>
+ /// Creates a new <see cref="PersianNormalizationFilterFactory"/> </summary>
public PersianNormalizationFilterFactory(IDictionary<string, string> args) : base(args)
{
if (args.Count > 0)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/20087487/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
index 28649e6..81a2cb2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
@@ -27,12 +27,11 @@ namespace Lucene.Net.Analysis.Fa
/// </para>
/// <para>
/// Normalization is defined as:
- /// <ul>
- /// <li>Normalization of various heh + hamza forms and heh goal to heh.
- /// <li>Normalization of farsi yeh and yeh barree to arabic yeh
- /// <li>Normalization of persian keheh to arabic kaf
- /// </ul>
- ///
+ /// <list type="bullet">
+ /// <item>Normalization of various heh + hamza forms and heh goal to heh.</item>
+ /// <item>Normalization of farsi yeh and yeh barree to arabic yeh</item>
+ /// <item>Normalization of persian keheh to arabic kaf</item>
+ /// </list>
/// </para>
/// </summary>
public class PersianNormalizer
[12/13] lucenenet git commit: Lucene.Net.Analysis.Eu refactor: member
accessibility and documentation comments
Posted by ni...@apache.org.
Lucene.Net.Analysis.Eu refactor: member accessibility and documentation comments
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/98edabe0
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/98edabe0
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/98edabe0
Branch: refs/heads/api-work
Commit: 98edabe0d3f18db162f7b1c8c63d9c12e177ef2d
Parents: 3d97e6a
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 11:31:34 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 11:31:34 2017 +0700
----------------------------------------------------------------------
.../Analysis/Eu/BasqueAnalyzer.cs | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98edabe0/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
index b6c20dc..029f798 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
@@ -50,7 +50,7 @@ namespace Lucene.Net.Analysis.Eu
}
/// <summary>
- /// Atomically loads the DEFAULT_STOP_SET in a lazy fashion once the outer class
+ /// Atomically loads the <see cref="DEFAULT_STOP_SET"/> in a lazy fashion once the outer class
/// accesses the static final set the first time.;
/// </summary>
private class DefaultSetHolder
@@ -74,7 +74,7 @@ namespace Lucene.Net.Analysis.Eu
}
/// <summary>
- /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public BasqueAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -84,7 +84,7 @@ namespace Lucene.Net.Analysis.Eu
/// <summary>
/// Builds an analyzer with the given stop words.
/// </summary>
- /// <param name="matchVersion"> lucene compatibility version </param>
+ /// <param name="matchVersion"> <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords"> a stopword set </param>
public BasqueAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords)
: this(matchVersion, stopwords, CharArraySet.EMPTY_SET)
@@ -96,7 +96,7 @@ namespace Lucene.Net.Analysis.Eu
/// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
- /// <param name="matchVersion"> lucene compatibility version </param>
+ /// <param name="matchVersion"> <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords"> a stopword set </param>
/// <param name="stemExclusionSet"> a set of terms not to be stemmed </param>
public BasqueAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords, CharArraySet stemExclusionSet)
@@ -113,8 +113,8 @@ namespace Lucene.Net.Analysis.Eu
/// <returns> A
/// <see cref="Analyzer.TokenStreamComponents"/>
/// built from an <see cref="StandardTokenizer"/> filtered with
- /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
- /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
+ /// <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
/// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
[09/13] lucenenet git commit: Lucene.Net.Analysis.De refactor: member
accessibility and documentation comments
Posted by ni...@apache.org.
Lucene.Net.Analysis.De refactor: member accessibility and documentation comments
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/217f113e
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/217f113e
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/217f113e
Branch: refs/heads/api-work
Commit: 217f113e91923f846fdd00f2aba403d0a1c59b02
Parents: 31d8cbd
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 11:08:05 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 11:08:05 2017 +0700
----------------------------------------------------------------------
.../Analysis/De/GermanAnalyzer.cs | 49 ++++++++++++--------
.../Analysis/De/GermanLightStemFilter.cs | 2 +-
.../Analysis/De/GermanLightStemFilterFactory.cs | 3 +-
.../Analysis/De/GermanLightStemmer.cs | 3 +-
.../Analysis/De/GermanMinimalStemFilter.cs | 2 +-
.../De/GermanMinimalStemFilterFactory.cs | 2 +-
.../Analysis/De/GermanMinimalStemmer.cs | 3 +-
.../Analysis/De/GermanNormalizationFilter.cs | 16 +++----
.../De/GermanNormalizationFilterFactory.cs | 3 +-
.../Analysis/De/GermanStemFilter.cs | 4 +-
.../Analysis/De/GermanStemFilterFactory.cs | 6 +--
.../Analysis/De/GermanStemmer.cs | 24 +++++-----
12 files changed, 63 insertions(+), 54 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
index eae217f..7eefe2e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
@@ -42,16 +42,15 @@ namespace Lucene.Net.Analysis.De
/// exclusion list is empty by default.
/// </para>
///
- /// <a name="version"/>
/// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating GermanAnalyzer:
- /// <ul>
- /// <li> As of 3.6, GermanLightStemFilter is used for less aggressive stemming.
- /// <li> As of 3.1, Snowball stemming is done with SnowballFilter, and
- /// Snowball stopwords are used by default.
- /// <li> As of 2.9, StopFilter preserves position
- /// increments
- /// </ul>
+ /// <list>
+ /// <item> As of 3.6, GermanLightStemFilter is used for less aggressive stemming.</item>
+ /// <item> As of 3.1, Snowball stemming is done with SnowballFilter, and
+ /// Snowball stopwords are used by default.</item>
+ /// <item> As of 2.9, StopFilter preserves position
+ /// increments</item>
+ /// </list>
///
/// </para>
/// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
@@ -59,10 +58,22 @@ namespace Lucene.Net.Analysis.De
/// </summary>
public sealed class GermanAnalyzer : StopwordAnalyzerBase
{
-
/// @deprecated in 3.1, remove in Lucene 5.0 (index bw compat)
[Obsolete("in 3.1, remove in Lucene 5.0 (index bw compat)")]
- private static readonly string[] GERMAN_STOP_WORDS = new string[] { "einer", "eine", "eines", "einem", "einen", "der", "die", "das", "dass", "da�", "du", "er", "sie", "es", "was", "wer", "wie", "wir", "und", "oder", "ohne", "mit", "am", "im", "in", "aus", "auf", "ist", "sein", "war", "wird", "ihr", "ihre", "ihres", "als", "f�r", "von", "mit", "dich", "dir", "mich", "mir", "mein", "sein", "kein", "durch", "wegen", "wird" };
+ private static readonly string[] GERMAN_STOP_WORDS = new string[] {
+ "einer", "eine", "eines", "einem", "einen",
+ "der", "die", "das", "dass", "da�",
+ "du", "er", "sie", "es",
+ "was", "wer", "wie", "wir",
+ "und", "oder", "ohne", "mit",
+ "am", "im", "in", "aus", "auf",
+ "ist", "sein", "war", "wird",
+ "ihr", "ihre", "ihres",
+ "als", "f�r", "von", "mit",
+ "dich", "dir", "mich", "mir",
+ "mein", "sein", "kein",
+ "durch", "wegen", "wird"
+ };
/// <summary>
/// File containing default German stopwords. </summary>
@@ -105,9 +116,9 @@ namespace Lucene.Net.Analysis.De
}
}
- /// <summary>
- /// Contains the stopwords used with the <see cref="StopFilter"/>.
- /// </summary>
+ ///// <summary>
+ ///// Contains the stopwords used with the <see cref="StopFilter"/>.
+ ///// </summary>
/// <summary>
/// Contains words that should be indexed but not stemmed.
@@ -116,7 +127,7 @@ namespace Lucene.Net.Analysis.De
/// <summary>
/// Builds an analyzer with the default stop words:
- /// <see cref="#getDefaultStopSet()"/>.
+ /// <see cref="DefaultStopSet"/>.
/// </summary>
public GermanAnalyzer(LuceneVersion matchVersion)
#pragma warning disable 612, 618
@@ -130,7 +141,7 @@ namespace Lucene.Net.Analysis.De
/// Builds an analyzer with the given stop words
/// </summary>
/// <param name="matchVersion">
- /// lucene compatibility version </param>
+ /// <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords">
/// a stopword set </param>
public GermanAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords)
@@ -142,7 +153,7 @@ namespace Lucene.Net.Analysis.De
/// Builds an analyzer with the given stop words
/// </summary>
/// <param name="matchVersion">
- /// lucene compatibility version </param>
+ /// <see cref="LuceneVersion"/> lucene compatibility version </param>
/// <param name="stopwords">
/// a stopword set </param>
/// <param name="stemExclusionSet">
@@ -156,12 +167,12 @@ namespace Lucene.Net.Analysis.De
/// <summary>
/// Creates
/// <see cref="Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <see cref="Reader"/>.
+ /// used to tokenize all the text in the provided <see cref="TextReader"/>.
/// </summary>
/// <returns> <see cref="Analyzer.TokenStreamComponents"/>
/// built from a <see cref="StandardTokenizer"/> filtered with
- /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
- /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
+ /// <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
/// provided, <see cref="GermanNormalizationFilter"/> and <see cref="GermanLightStemFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
index a58138c..1bce56b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.De
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// <see cref="Miscellaneous.SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
/// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
index f16956c..207b788 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
@@ -33,9 +33,8 @@ namespace Lucene.Net.Analysis.De
/// </summary>
public class GermanLightStemFilterFactory : TokenFilterFactory
{
-
/// <summary>
- /// Creates a new GermanLightStemFilterFactory </summary>
+ /// Creates a new <see cref="GermanLightStemFilterFactory"/> </summary>
public GermanLightStemFilterFactory(IDictionary<string, string> args)
: base(args)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemmer.cs
index 96ce9ac..c1940d2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemmer.cs
@@ -56,13 +56,12 @@
/// Light Stemmer for German.
/// <para>
/// This stemmer implements the "UniNE" algorithm in:
- /// <i>Light Stemming Approaches for the French, Portuguese, German and Hungarian Languages</i>
+ /// <c>Light Stemming Approaches for the French, Portuguese, German and Hungarian Languages</c>
/// Jacques Savoy
/// </para>
/// </summary>
public class GermanLightStemmer
{
-
public virtual int Stem(char[] s, int len)
{
for (int i = 0; i < len; i++)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
index 84f1f4b..094a6fc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.De
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// <see cref="Miscellaneous.SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
/// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
index 25c6cb1..cdf77e7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.De
{
/// <summary>
- /// Creates a new GermanMinimalStemFilterFactory </summary>
+ /// Creates a new <see cref="GermanMinimalStemFilterFactory"/> </summary>
public GermanMinimalStemFilterFactory(IDictionary<string, string> args) : base(args)
{
if (args.Count > 0)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemmer.cs
index 05b2d9a..6e01384 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemmer.cs
@@ -56,13 +56,12 @@
/// Minimal Stemmer for German.
/// <para>
/// This stemmer implements the following algorithm:
- /// <i>Morphologie et recherche d'information</i>
+ /// <c>Morphologie et recherche d'information</c>
/// Jacques Savoy.
/// </para>
/// </summary>
public class GermanMinimalStemmer
{
-
public virtual int Stem(char[] s, int len)
{
if (len < 5)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
index 025c674..7160e1c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
@@ -23,16 +23,16 @@ namespace Lucene.Net.Analysis.De
/// <summary>
/// Normalizes German characters according to the heuristics
- /// of the <a href="http://snowball.tartarus.org/algorithms/german2/stemmer.html">
- /// German2 snowball algorithm</a>.
+ /// of the <c>http://snowball.tartarus.org/algorithms/german2/stemmer.html
+ /// German2 snowball algorithm</c>.
/// It allows for the fact that �, � and � are sometimes written as ae, oe and ue.
/// <para>
- /// <ul>
- /// <li> '�' is replaced by 'ss'
- /// <li> '�', '�', '�' are replaced by 'a', 'o', 'u', respectively.
- /// <li> 'ae' and 'oe' are replaced by 'a', and 'o', respectively.
- /// <li> 'ue' is replaced by 'u', when not following a vowel or q.
- /// </ul>
+ /// <list>
+ /// <item> '�' is replaced by 'ss'</item>
+ /// <item> '�', '�', '�' are replaced by 'a', 'o', 'u', respectively.</item>
+ /// <item> 'ae' and 'oe' are replaced by 'a', and 'o', respectively.</item>
+ /// <item> 'ue' is replaced by 'u', when not following a vowel or q.</item>
+ /// </list>
/// </para>
/// <para>
/// This is useful if you want this normalization without using
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
index 85cd62d..5afe9be 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
@@ -33,9 +33,8 @@ namespace Lucene.Net.Analysis.De
/// </summary>
public class GermanNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
-
/// <summary>
- /// Creates a new GermanNormalizationFilterFactory </summary>
+ /// Creates a new <see cref="GermanNormalizationFilterFactory"/> </summary>
public GermanNormalizationFilterFactory(IDictionary<string, string> args) : base(args)
{
if (args.Count > 0)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
index 542c6a7..cd4291c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
@@ -28,10 +28,10 @@ namespace Lucene.Net.Analysis.De
/// </para>
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// <see cref="Miscellaneous.SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
/// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para> </summary>
- /// <seealso cref= SetKeywordMarkerFilter </seealso>
+ /// <seealso cref="Miscellaneous.SetKeywordMarkerFilter"/>
public sealed class GermanStemFilter : TokenFilter
{
/// <summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
index d182b4a..7176d90 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
@@ -33,10 +33,10 @@ namespace Lucene.Net.Analysis.De
/// </summary>
public class GermanStemFilterFactory : TokenFilterFactory
{
-
/// <summary>
- /// Creates a new GermanStemFilterFactory </summary>
- public GermanStemFilterFactory(IDictionary<string, string> args) : base(args)
+ /// Creates a new <see cref="GermanStemFilterFactory"/> </summary>
+ public GermanStemFilterFactory(IDictionary<string, string> args)
+ : base(args)
{
if (args.Count > 0)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/217f113e/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
index 93a759e..99f2455 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.De
/// A stemmer for German words.
/// <para>
/// The algorithm is based on the report
- /// "A Fast and Simple Stemming Algorithm for German Words" by Jörg
+ /// "A Fast and Simple Stemming Algorithm for German Words" by J�rg
/// Caumanns (joerg.caumanns at isst.fhg.de).
/// </para>
/// </summary>
@@ -38,17 +38,17 @@ namespace Lucene.Net.Analysis.De
private StringBuilder sb = new StringBuilder();
/// <summary>
- /// Amount of characters that are removed with <tt>substitute()</tt> while stemming.
+ /// Amount of characters that are removed with <see cref="Substitute"/> while stemming.
/// </summary>
private int substCount = 0;
private static readonly CultureInfo locale = new CultureInfo("de-DE");
/// <summary>
- /// Stemms the given term to an unique <tt>discriminator</tt>.
+ /// Stemms the given term to an unique <c>discriminator</c>.
/// </summary>
/// <param name="term"> The term that should be stemmed. </param>
- /// <returns> Discriminator for <tt>term</tt> </returns>
+ /// <returns> Discriminator for <paramref name="term"/> </returns>
protected internal virtual string Stem(string term)
{
// Use lowercase for medium stemming.
@@ -175,12 +175,14 @@ namespace Lucene.Net.Analysis.De
/// <summary>
/// Do some substitutions for the term to reduce overstemming:
///
- /// - Substitute Umlauts with their corresponding vowel: ��� -> aou,
- /// "�" is substituted by "ss"
- /// - Substitute a second char of a pair of equal characters with
- /// an asterisk: ?? -> ?*
- /// - Substitute some common character combinations with a token:
- /// sch/ch/ei/ie/ig/st -> $/�/%/&/#/!
+ /// <list type="bullet">
+ /// <item>Substitute Umlauts with their corresponding vowel: ��� -> aou,
+ /// "�" is substituted by "ss"</item>
+ /// <item>Substitute a second char of a pair of equal characters with
+ /// an asterisk: ?? -> ?*</item>
+ /// <item>Substitute some common character combinations with a token:
+ /// sch/ch/ei/ie/ig/st -> $/�/%/&/#/!</item>
+ /// </list>
/// </summary>
private void Substitute(StringBuilder buffer)
{
@@ -257,7 +259,7 @@ namespace Lucene.Net.Analysis.De
}
/// <summary>
- /// Undoes the changes made by substitute(). That are character pairs and
+ /// Undoes the changes made by <see cref="Substitute"/>. That are character pairs and
/// character combinations. Umlauts will remain as their corresponding vowel,
/// as "�" remains as "ss".
/// </summary>
[08/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
Posted by ni...@apache.org.
Lucene.Net.Analysis.Common: find and replace for document comments - <pre class="prettyprint"> > <code>, </pre> > </code>, <seealso cref=" > <see cref=", org.apache.lucene.analysis.Analyzer.TokenStreamComponents > Analyzer.TokenStreamComponents, <see cref="Version"/> > <see cref="LuceneVersion"/>
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/31d8cbde
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/31d8cbde
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/31d8cbde
Branch: refs/heads/api-work
Commit: 31d8cbde992061bdd70a62349f2c7fcfac7733e5
Parents: 829f8ee
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 10:38:45 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 10:38:45 2017 +0700
----------------------------------------------------------------------
.../Analysis/De/GermanAnalyzer.cs | 26 ++++-----
.../Analysis/De/GermanLightStemFilter.cs | 6 +-
.../Analysis/De/GermanLightStemFilterFactory.cs | 6 +-
.../Analysis/De/GermanMinimalStemFilter.cs | 6 +-
.../De/GermanMinimalStemFilterFactory.cs | 6 +-
.../De/GermanNormalizationFilterFactory.cs | 6 +-
.../Analysis/De/GermanStemFilter.cs | 14 ++---
.../Analysis/De/GermanStemFilterFactory.cs | 6 +-
.../Analysis/El/GreekAnalyzer.cs | 22 ++++----
.../Analysis/El/GreekLowerCaseFilter.cs | 2 +-
.../Analysis/El/GreekLowerCaseFilterFactory.cs | 6 +-
.../Analysis/El/GreekStemFilter.cs | 8 +--
.../Analysis/El/GreekStemFilterFactory.cs | 6 +-
.../Analysis/El/GreekStemmer.cs | 4 +-
.../Analysis/En/EnglishAnalyzer.cs | 22 ++++----
.../Analysis/En/EnglishMinimalStemFilter.cs | 6 +-
.../En/EnglishMinimalStemFilterFactory.cs | 6 +-
.../Analysis/En/EnglishPossessiveFilter.cs | 6 +-
.../En/EnglishPossessiveFilterFactory.cs | 6 +-
.../Analysis/En/KStemFilter.cs | 8 +--
.../Analysis/En/KStemFilterFactory.cs | 6 +-
.../Analysis/En/PorterStemFilter.cs | 12 ++--
.../Analysis/En/PorterStemFilterFactory.cs | 6 +-
.../Analysis/Es/SpanishAnalyzer.cs | 22 ++++----
.../Analysis/Es/SpanishLightStemFilter.cs | 6 +-
.../Es/SpanishLightStemFilterFactory.cs | 6 +-
.../Analysis/Eu/BasqueAnalyzer.cs | 20 +++----
.../Analysis/Fa/PersianAnalyzer.cs | 20 +++----
.../Analysis/Fa/PersianCharFilterFactory.cs | 6 +-
.../Analysis/Fa/PersianNormalizationFilter.cs | 2 +-
.../Fa/PersianNormalizationFilterFactory.cs | 6 +-
.../Analysis/Fi/FinnishAnalyzer.cs | 20 +++----
.../Analysis/Fi/FinnishLightStemFilter.cs | 6 +-
.../Fi/FinnishLightStemFilterFactory.cs | 6 +-
.../Analysis/Fr/FrenchAnalyzer.cs | 26 ++++-----
.../Analysis/Fr/FrenchLightStemFilter.cs | 6 +-
.../Analysis/Fr/FrenchLightStemFilterFactory.cs | 6 +-
.../Analysis/Fr/FrenchMinimalStemFilter.cs | 6 +-
.../Fr/FrenchMinimalStemFilterFactory.cs | 6 +-
.../Analysis/Fr/FrenchStemFilter.cs | 14 ++---
.../Analysis/Fr/FrenchStemmer.cs | 2 +-
.../Analysis/Ga/IrishAnalyzer.cs | 20 +++----
.../Analysis/Ga/IrishLowerCaseFilterFactory.cs | 6 +-
.../Analysis/Gl/GalicianAnalyzer.cs | 20 +++----
.../Analysis/Gl/GalicianMinimalStemFilter.cs | 6 +-
.../Gl/GalicianMinimalStemFilterFactory.cs | 6 +-
.../Analysis/Gl/GalicianStemFilter.cs | 6 +-
.../Analysis/Gl/GalicianStemFilterFactory.cs | 6 +-
.../Analysis/Hi/HindiAnalyzer.cs | 18 +++---
.../Analysis/Hi/HindiNormalizationFilter.cs | 6 +-
.../Hi/HindiNormalizationFilterFactory.cs | 6 +-
.../Analysis/Hi/HindiStemFilter.cs | 2 +-
.../Analysis/Hi/HindiStemFilterFactory.cs | 6 +-
.../Analysis/Hu/HungarianAnalyzer.cs | 20 +++----
.../Analysis/Hu/HungarianLightStemFilter.cs | 6 +-
.../Hu/HungarianLightStemFilterFactory.cs | 6 +-
.../Analysis/Hunspell/Dictionary.cs | 10 ++--
.../Analysis/Hunspell/HunspellStemFilter.cs | 12 ++--
.../Hunspell/HunspellStemFilterFactory.cs | 6 +-
.../Analysis/Hy/ArmenianAnalyzer.cs | 20 +++----
.../Analysis/Id/IndonesianAnalyzer.cs | 20 +++----
.../Analysis/Id/IndonesianStemFilter.cs | 4 +-
.../Analysis/Id/IndonesianStemFilterFactory.cs | 6 +-
.../Analysis/In/IndicNormalizationFilter.cs | 2 +-
.../In/IndicNormalizationFilterFactory.cs | 6 +-
.../Analysis/In/IndicTokenizer.cs | 2 +-
.../Analysis/It/ItalianAnalyzer.cs | 22 ++++----
.../Analysis/It/ItalianLightStemFilter.cs | 6 +-
.../It/ItalianLightStemFilterFactory.cs | 6 +-
.../Analysis/Lv/LatvianAnalyzer.cs | 20 +++----
.../Analysis/Lv/LatvianStemFilter.cs | 6 +-
.../Analysis/Lv/LatvianStemFilterFactory.cs | 6 +-
.../Miscellaneous/ASCIIFoldingFilter.cs | 2 +-
.../Miscellaneous/ASCIIFoldingFilterFactory.cs | 6 +-
.../CapitalizationFilterFactory.cs | 6 +-
.../Miscellaneous/CodepointCountFilter.cs | 8 +--
.../CodepointCountFilterFactory.cs | 6 +-
.../Miscellaneous/HyphenatedWordsFilter.cs | 4 +-
.../HyphenatedWordsFilterFactory.cs | 6 +-
.../Analysis/Miscellaneous/KeepWordFilter.cs | 4 +-
.../Miscellaneous/KeepWordFilterFactory.cs | 6 +-
.../Miscellaneous/KeywordMarkerFilter.cs | 4 +-
.../Miscellaneous/KeywordMarkerFilterFactory.cs | 6 +-
.../Miscellaneous/KeywordRepeatFilter.cs | 4 +-
.../Miscellaneous/KeywordRepeatFilterFactory.cs | 6 +-
.../Analysis/Miscellaneous/LengthFilter.cs | 8 +--
.../Miscellaneous/LengthFilterFactory.cs | 6 +-
.../Miscellaneous/LimitTokenCountAnalyzer.cs | 2 +-
.../Miscellaneous/LimitTokenCountFilter.cs | 4 +-
.../LimitTokenCountFilterFactory.cs | 8 +--
.../Miscellaneous/LimitTokenPositionFilter.cs | 2 +-
.../LimitTokenPositionFilterFactory.cs | 8 +--
.../Lucene47WordDelimiterFilter.cs | 12 ++--
.../Analysis/Miscellaneous/PatternAnalyzer.cs | 22 ++++----
.../Miscellaneous/PatternKeywordMarkerFilter.cs | 8 +--
.../Miscellaneous/PerFieldAnalyzerWrapper.cs | 6 +-
.../PrefixAndSuffixAwareTokenFilter.cs | 2 +-
.../RemoveDuplicatesTokenFilterFactory.cs | 6 +-
.../Miscellaneous/ScandinavianFoldingFilter.cs | 2 +-
.../ScandinavianFoldingFilterFactory.cs | 6 +-
.../ScandinavianNormalizationFilter.cs | 2 +-
.../ScandinavianNormalizationFilterFactory.cs | 6 +-
.../Miscellaneous/SetKeywordMarkerFilter.cs | 6 +-
.../Miscellaneous/SingleTokenTokenStream.cs | 2 +-
.../Miscellaneous/StemmerOverrideFilter.cs | 22 ++++----
.../StemmerOverrideFilterFactory.cs | 6 +-
.../Analysis/Miscellaneous/TrimFilter.cs | 4 +-
.../Analysis/Miscellaneous/TrimFilterFactory.cs | 6 +-
.../Miscellaneous/TruncateTokenFilterFactory.cs | 6 +-
.../Miscellaneous/WordDelimiterFilter.cs | 16 +++---
.../Miscellaneous/WordDelimiterFilterFactory.cs | 6 +-
.../Miscellaneous/WordDelimiterIterator.cs | 2 +-
.../Analysis/Ngram/EdgeNGramFilterFactory.cs | 6 +-
.../Analysis/Ngram/EdgeNGramTokenFilter.cs | 14 ++---
.../Analysis/Ngram/EdgeNGramTokenizer.cs | 12 ++--
.../Analysis/Ngram/EdgeNGramTokenizerFactory.cs | 6 +-
.../Ngram/Lucene43EdgeNGramTokenizer.cs | 28 +++++-----
.../Analysis/Ngram/Lucene43NGramTokenizer.cs | 10 ++--
.../Analysis/Ngram/NGramFilterFactory.cs | 6 +-
.../Analysis/Ngram/NGramTokenFilter.cs | 18 +++---
.../Analysis/Ngram/NGramTokenizer.cs | 14 ++---
.../Analysis/Ngram/NGramTokenizerFactory.cs | 8 +--
.../Analysis/Nl/DutchAnalyzer.cs | 26 ++++-----
.../Analysis/Nl/DutchStemFilter.cs | 14 ++---
.../Analysis/Nl/DutchStemmer.cs | 2 +-
.../Analysis/No/NorwegianAnalyzer.cs | 20 +++----
.../Analysis/No/NorwegianLightStemFilter.cs | 10 ++--
.../No/NorwegianLightStemFilterFactory.cs | 6 +-
.../Analysis/No/NorwegianLightStemmer.cs | 2 +-
.../Analysis/No/NorwegianMinimalStemFilter.cs | 10 ++--
.../No/NorwegianMinimalStemFilterFactory.cs | 6 +-
.../Analysis/No/NorwegianMinimalStemmer.cs | 4 +-
.../Analysis/Path/PathHierarchyTokenizer.cs | 4 +-
.../Path/PathHierarchyTokenizerFactory.cs | 10 ++--
.../Path/ReversePathHierarchyTokenizer.cs | 4 +-
.../Pattern/PatternCaptureGroupFilterFactory.cs | 6 +-
.../Pattern/PatternCaptureGroupTokenFilter.cs | 6 +-
.../Pattern/PatternReplaceCharFilterFactory.cs | 6 +-
.../Analysis/Pattern/PatternReplaceFilter.cs | 2 +-
.../Pattern/PatternReplaceFilterFactory.cs | 6 +-
.../Analysis/Pattern/PatternTokenizer.cs | 4 +-
.../Analysis/Pattern/PatternTokenizerFactory.cs | 6 +-
.../Payloads/DelimitedPayloadTokenFilter.cs | 2 +-
.../DelimitedPayloadTokenFilterFactory.cs | 6 +-
.../Analysis/Payloads/FloatEncoder.cs | 2 +-
.../Analysis/Payloads/IntegerEncoder.cs | 4 +-
.../Payloads/NumericPayloadTokenFilter.cs | 2 +-
.../NumericPayloadTokenFilterFactory.cs | 6 +-
.../Analysis/Payloads/PayloadEncoder.cs | 6 +-
.../Analysis/Payloads/PayloadHelper.cs | 2 +-
.../Payloads/TokenOffsetPayloadTokenFilter.cs | 4 +-
.../TokenOffsetPayloadTokenFilterFactory.cs | 6 +-
.../Payloads/TypeAsPayloadTokenFilter.cs | 4 +-
.../Payloads/TypeAsPayloadTokenFilterFactory.cs | 6 +-
.../Analysis/Position/PositionFilter.cs | 2 +-
.../Analysis/Position/PositionFilterFactory.cs | 6 +-
.../Analysis/Pt/PortugueseAnalyzer.cs | 22 ++++----
.../Analysis/Pt/PortugueseLightStemFilter.cs | 6 +-
.../Pt/PortugueseLightStemFilterFactory.cs | 6 +-
.../Analysis/Pt/PortugueseMinimalStemFilter.cs | 6 +-
.../Pt/PortugueseMinimalStemFilterFactory.cs | 6 +-
.../Analysis/Pt/PortugueseStemFilter.cs | 6 +-
.../Analysis/Pt/PortugueseStemFilterFactory.cs | 6 +-
.../Analysis/Pt/RSLPStemmerBase.cs | 2 +-
.../Analysis/Query/QueryAutoStopWordAnalyzer.cs | 14 ++---
.../Analysis/Reverse/ReverseStringFilter.cs | 10 ++--
.../Reverse/ReverseStringFilterFactory.cs | 6 +-
.../Analysis/Ro/RomanianAnalyzer.cs | 20 +++----
.../Analysis/Ru/RussianAnalyzer.cs | 18 +++---
.../Analysis/Ru/RussianLetterTokenizer.cs | 24 ++++----
.../Ru/RussianLetterTokenizerFactory.cs | 2 +-
.../Analysis/Ru/RussianLightStemFilter.cs | 6 +-
.../Ru/RussianLightStemFilterFactory.cs | 6 +-
.../Analysis/Shingle/ShingleAnalyzerWrapper.cs | 6 +-
.../Analysis/Shingle/ShingleFilter.cs | 28 +++++-----
.../Analysis/Shingle/ShingleFilterFactory.cs | 6 +-
.../Analysis/Sinks/DateRecognizerSinkFilter.cs | 38 ++++++-------
.../Analysis/Sinks/TeeSinkTokenFilter.cs | 22 ++++----
.../Analysis/Snowball/SnowballAnalyzer.cs | 18 +++---
.../Analysis/Snowball/SnowballFilter.cs | 18 +++---
.../Snowball/SnowballPorterFilterFactory.cs | 6 +-
.../Analysis/Standard/ClassicAnalyzer.cs | 8 +--
.../Analysis/Standard/ClassicFilter.cs | 2 +-
.../Analysis/Standard/ClassicFilterFactory.cs | 6 +-
.../Analysis/Standard/ClassicTokenizer.cs | 6 +-
.../Standard/ClassicTokenizerFactory.cs | 6 +-
.../Analysis/Standard/StandardAnalyzer.cs | 8 +--
.../Analysis/Standard/StandardFilter.cs | 2 +-
.../Analysis/Standard/StandardFilterFactory.cs | 6 +-
.../Analysis/Standard/StandardTokenizer.cs | 8 +--
.../Standard/StandardTokenizerFactory.cs | 6 +-
.../Standard/StandardTokenizerInterface.cs | 2 +-
.../Analysis/Standard/UAX29URLEmailAnalyzer.cs | 10 ++--
.../Analysis/Standard/UAX29URLEmailTokenizer.cs | 4 +-
.../Standard/UAX29URLEmailTokenizerFactory.cs | 6 +-
.../Analysis/Sv/SwedishAnalyzer.cs | 20 +++----
.../Analysis/Sv/SwedishLightStemFilter.cs | 6 +-
.../Sv/SwedishLightStemFilterFactory.cs | 6 +-
.../Analysis/Synonym/FSTSynonymFilterFactory.cs | 4 +-
.../Analysis/Synonym/SlowSynonymFilter.cs | 2 +-
.../Synonym/SlowSynonymFilterFactory.cs | 6 +-
.../Analysis/Synonym/SlowSynonymMap.cs | 4 +-
.../Analysis/Synonym/SynonymFilter.cs | 6 +-
.../Analysis/Synonym/SynonymFilterFactory.cs | 12 ++--
.../Analysis/Synonym/SynonymMap.cs | 6 +-
.../Analysis/Th/ThaiAnalyzer.cs | 16 +++---
.../Analysis/Th/ThaiTokenizer.cs | 2 +-
.../Analysis/Th/ThaiTokenizerFactory.cs | 6 +-
.../Analysis/Th/ThaiWordFilter.cs | 6 +-
.../Analysis/Th/ThaiWordFilterFactory.cs | 8 +--
.../Analysis/Tr/ApostropheFilterFactory.cs | 6 +-
.../Analysis/Tr/TurkishAnalyzer.cs | 20 +++----
.../Tr/TurkishLowerCaseFilterFactory.cs | 6 +-
.../Analysis/Util/AbstractAnalysisFactory.cs | 16 +++---
.../Analysis/Util/AnalysisSPILoader.cs | 4 +-
.../Analysis/Util/CharArrayIterator.cs | 6 +-
.../Analysis/Util/CharArrayMap.cs | 2 +-
.../Analysis/Util/CharFilterFactory.cs | 6 +-
.../Analysis/Util/CharTokenizer.cs | 34 ++++++------
.../Analysis/Util/CharacterUtils.cs | 58 ++++++++++----------
.../Analysis/Util/ClasspathResourceLoader.cs | 4 +-
.../Analysis/Util/ElisionFilter.cs | 6 +-
.../Analysis/Util/ElisionFilterFactory.cs | 6 +-
.../Analysis/Util/FilesystemResourceLoader.cs | 10 ++--
.../Analysis/Util/FilteringTokenFilter.cs | 14 ++---
.../Analysis/Util/ResourceLoaderAware.cs | 2 +-
.../Analysis/Util/RollingCharBuffer.cs | 2 +-
.../Analysis/Util/StopwordAnalyzerBase.cs | 8 +--
.../Analysis/Util/TokenFilterFactory.cs | 6 +-
.../Analysis/Util/TokenizerFactory.cs | 6 +-
.../Analysis/Util/WordlistLoader.cs | 26 ++++-----
.../Analysis/Wikipedia/WikipediaTokenizer.cs | 14 ++---
.../Wikipedia/WikipediaTokenizerFactory.cs | 6 +-
.../Collation/CollationAttributeFactory.cs | 10 ++--
.../Collation/CollationKeyAnalyzer.cs | 16 +++---
.../Collation/CollationKeyFilter.cs | 12 ++--
.../Collation/CollationKeyFilterFactory.cs | 12 ++--
.../CollatedTermAttributeImpl.cs | 2 +-
238 files changed, 1044 insertions(+), 1044 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
index 255fa54..eae217f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for German language.
+ /// <see cref="Analyzer"/> for German language.
/// <para>
/// Supports an external list of stopwords (words that
/// will not be indexed at all) and an external list of exclusions (word that will
@@ -43,7 +43,7 @@ namespace Lucene.Net.Analysis.De
/// </para>
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating GermanAnalyzer:
/// <ul>
/// <li> As of 3.6, GermanLightStemFilter is used for less aggressive stemming.
@@ -54,8 +54,8 @@ namespace Lucene.Net.Analysis.De
/// </ul>
///
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>.</para>
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class GermanAnalyzer : StopwordAnalyzerBase
{
@@ -106,7 +106,7 @@ namespace Lucene.Net.Analysis.De
}
/// <summary>
- /// Contains the stopwords used with the <seealso cref="StopFilter"/>.
+ /// Contains the stopwords used with the <see cref="StopFilter"/>.
/// </summary>
/// <summary>
@@ -116,7 +116,7 @@ namespace Lucene.Net.Analysis.De
/// <summary>
/// Builds an analyzer with the default stop words:
- /// <seealso cref="#getDefaultStopSet()"/>.
+ /// <see cref="#getDefaultStopSet()"/>.
/// </summary>
public GermanAnalyzer(LuceneVersion matchVersion)
#pragma warning disable 612, 618
@@ -155,14 +155,14 @@ namespace Lucene.Net.Analysis.De
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided, <seealso cref="GermanNormalizationFilter"/> and <seealso cref="GermanLightStemFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided, <see cref="GermanNormalizationFilter"/> and <see cref="GermanLightStemFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
index 480c2cf..a58138c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GermanLightStemmer"/> to stem German
+ /// A <see cref="TokenFilter"/> that applies <see cref="GermanLightStemmer"/> to stem German
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class GermanLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
index dc08d57..f16956c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// Factory for <seealso cref="GermanLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GermanLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_delgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GermanLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GermanLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
index 424057d..84f1f4b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GermanMinimalStemmer"/> to stem German
+ /// A <see cref="TokenFilter"/> that applies <see cref="GermanMinimalStemmer"/> to stem German
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class GermanMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
index d9a07f1..25c6cb1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// Factory for <seealso cref="GermanMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GermanMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_deminstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GermanMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GermanMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
index d44c274..85cd62d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// Factory for <seealso cref="GermanNormalizationFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GermanNormalizationFilter"/>.
+ /// <code>
/// <fieldType name="text_denorm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GermanNormalizationFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GermanNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
index e4ea2b6..542c6a7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
@@ -20,16 +20,16 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that stems German words.
+ /// A <see cref="TokenFilter"/> that stems German words.
/// <para>
/// It supports a table of words that should
/// not be stemmed at all. The stemmer used can be changed at runtime after the
- /// filter object is created (as long as it is a <seealso cref="GermanStemmer"/>).
+ /// filter object is created (as long as it is a <see cref="GermanStemmer"/>).
/// </para>
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para> </summary>
/// <seealso cref= SetKeywordMarkerFilter </seealso>
public sealed class GermanStemFilter : TokenFilter
@@ -43,8 +43,8 @@ namespace Lucene.Net.Analysis.De
private readonly IKeywordAttribute keywordAttr;
/// <summary>
- /// Creates a <seealso cref="GermanStemFilter"/> instance </summary>
- /// <param name="in"> the source <seealso cref="TokenStream"/> </param>
+ /// Creates a <see cref="GermanStemFilter"/> instance </summary>
+ /// <param name="in"> the source <see cref="TokenStream"/> </param>
public GermanStemFilter(TokenStream @in)
: base(@in)
{
@@ -77,7 +77,7 @@ namespace Lucene.Net.Analysis.De
}
/// <summary>
- /// Set a alternative/custom <seealso cref="GermanStemmer"/> for this filter.
+ /// Set a alternative/custom <see cref="GermanStemmer"/> for this filter.
/// </summary>
public GermanStemmer Stemmer
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
index 5e1ccae..d182b4a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// Factory for <seealso cref="GermanStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GermanStemFilter"/>.
+ /// <code>
/// <fieldType name="text_destem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GermanStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GermanStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
index 0d46eba..56024bd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for the Greek language.
+ /// <see cref="Analyzer"/> for the Greek language.
/// <para>
/// Supports an external list of stopwords (words
/// that will not be indexed at all).
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.El
/// </para>
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating GreekAnalyzer:
/// <ul>
/// <li> As of 3.1, StandardFilter and GreekStemmer are used by default.
@@ -42,8 +42,8 @@ namespace Lucene.Net.Analysis.El
/// </ul>
///
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>.</para>
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class GreekAnalyzer : StopwordAnalyzerBase
{
@@ -95,7 +95,7 @@ namespace Lucene.Net.Analysis.El
/// Builds an analyzer with the given stop words.
/// <para>
/// <b>NOTE:</b> The stopwords set should be pre-processed with the logic of
- /// <seealso cref="GreekLowerCaseFilter"/> for best results.
+ /// <see cref="GreekLowerCaseFilter"/> for best results.
///
/// </para>
/// </summary>
@@ -109,13 +109,13 @@ namespace Lucene.Net.Analysis.El
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="GreekLowerCaseFilter"/>, <seealso cref="StandardFilter"/>,
- /// <seealso cref="StopFilter"/>, and <seealso cref="GreekStemFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="GreekLowerCaseFilter"/>, <see cref="StandardFilter"/>,
+ /// <see cref="StopFilter"/>, and <see cref="GreekStemFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
index b6d1271..559e15e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.El
/// Normalizes token text to lower case, removes some Greek diacritics,
/// and standardizes final sigma to sigma.
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating GreekLowerCaseFilter:
/// <ul>
/// <li> As of 3.1, supplementary characters are properly lowercased.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
index 2dd8d8d..2e2daee 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// Factory for <seealso cref="GreekLowerCaseFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GreekLowerCaseFilter"/>.
+ /// <code>
/// <fieldType name="text_glc" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.GreekLowerCaseFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GreekLowerCaseFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
index 9fb06cc..39b77f9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
@@ -20,17 +20,17 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GreekStemmer"/> to stem Greek
+ /// A <see cref="TokenFilter"/> that applies <see cref="GreekStemmer"/> to stem Greek
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// <para>
/// NOTE: Input is expected to be casefolded for Greek (including folding of final
/// sigma to sigma), and with diacritics removed. This can be achieved by using
- /// either <seealso cref="GreekLowerCaseFilter"/> or ICUFoldingFilter before GreekStemFilter.
+ /// either <see cref="GreekLowerCaseFilter"/> or ICUFoldingFilter before GreekStemFilter.
/// @lucene.experimental
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
index b7015c1..c09df42 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// Factory for <seealso cref="GreekStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GreekStemFilter"/>.
+ /// <code>
/// <fieldType name="text_gstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.GreekLowerCaseFilterFactory"/>
/// <filter class="solr.GreekStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GreekStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
index dbf3289..4e6dda3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.El
/// <para>
/// NOTE: Input is expected to be casefolded for Greek (including folding of final
/// sigma to sigma), and with diacritics removed. This can be achieved with
- /// either <seealso cref="GreekLowerCaseFilter"/> or ICUFoldingFilter.
+ /// either <see cref="GreekLowerCaseFilter"/> or ICUFoldingFilter.
/// @lucene.experimental
/// </para>
/// </summary>
@@ -1001,7 +1001,7 @@ namespace Lucene.Net.Analysis.El
/// </summary>
/// <param name="s"> A char[] array that represents a word. </param>
/// <param name="len"> The length of the char[] array. </param>
- /// <param name="suffix"> A <seealso cref="String"/> object to check if the word given ends with these characters. </param>
+ /// <param name="suffix"> A <see cref="String"/> object to check if the word given ends with these characters. </param>
/// <returns> True if the word ends with the suffix given , false otherwise. </returns>
private bool EndsWith(char[] s, int len, string suffix)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
index f198e0c..629744b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.En
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for English.
+ /// <see cref="Analyzer"/> for English.
/// </summary>
public sealed class EnglishAnalyzer : StopwordAnalyzerBase
{
@@ -53,7 +53,7 @@ namespace Lucene.Net.Analysis.En
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#getDefaultStopSet"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#getDefaultStopSet"/>.
/// </summary>
public EnglishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -72,7 +72,7 @@ namespace Lucene.Net.Analysis.En
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -86,16 +86,16 @@ namespace Lucene.Net.Analysis.En
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="EnglishPossessiveFilter"/>,
- /// <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="PorterStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="EnglishPossessiveFilter"/>,
+ /// <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="PorterStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs
index 752769a..5c95a9c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs
@@ -24,12 +24,12 @@ namespace Lucene.Net.Analysis.En
//using KeywordAttribute = org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="EnglishMinimalStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="EnglishMinimalStemmer"/> to stem
/// English words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class EnglishMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs
index 1812b4c..48fec3c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.En
*/
/// <summary>
- /// Factory for <seealso cref="EnglishMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="EnglishMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_enminstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.EnglishMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class EnglishMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
index 1aeb6bb..c6ca5ab 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.En
/// <summary>
/// TokenFilter that removes possessives (trailing 's) from words.
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating EnglishPossessiveFilter:
/// <ul>
/// <li> As of 3.6, U+2019 RIGHT SINGLE QUOTATION MARK and
@@ -38,8 +38,8 @@ namespace Lucene.Net.Analysis.En
private readonly ICharTermAttribute termAtt;
private LuceneVersion matchVersion;
- /// @deprecated Use <seealso cref="#EnglishPossessiveFilter(Version, TokenStream)"/> instead.
- [Obsolete(@"Use <seealso cref=""#EnglishPossessiveFilter(org.apache.lucene.util.Version, org.apache.lucene.analysis.TokenStream)""/> instead.")]
+ /// @deprecated Use <see cref="#EnglishPossessiveFilter(Version, TokenStream)"/> instead.
+ [Obsolete(@"Use <see cref=""#EnglishPossessiveFilter(org.apache.lucene.util.Version, org.apache.lucene.analysis.TokenStream)""/> instead.")]
public EnglishPossessiveFilter(TokenStream input) : this(LuceneVersion.LUCENE_35, input)
{
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs
index db82287..5718f8c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.En
//using TokenFilterFactory = org.apache.lucene.analysis.util.TokenFilterFactory;
/// <summary>
- /// Factory for <seealso cref="EnglishPossessiveFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="EnglishPossessiveFilter"/>.
+ /// <code>
/// <fieldType name="text_enpossessive" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.EnglishPossessiveFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class EnglishPossessiveFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs
index 4e7af70..4de595b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs
@@ -30,13 +30,13 @@ namespace Lucene.Net.Analysis.En
/// All terms must already be lowercased for this filter to work correctly.
///
/// <para>
- /// Note: This filter is aware of the <seealso cref="KeywordAttribute"/>. To prevent
+ /// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
/// certain terms from being passed to the stemmer
- /// <seealso cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
- /// in a previous <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+ /// in a previous <see cref="TokenStream"/>.
///
/// Note: For including the original term as well as the stemmed version, see
- /// <seealso cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+ /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
/// </para>
///
///
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs
index 20c71ce..17374f0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs
@@ -24,15 +24,15 @@ namespace Lucene.Net.Analysis.En
//using TokenFilterFactory = org.apache.lucene.analysis.util.TokenFilterFactory;
/// <summary>
- /// Factory for <seealso cref="KStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="KStemFilter"/>.
+ /// <code>
/// <fieldType name="text_kstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.KStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class KStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs
index 822895f..af42187 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.En
/// To use this with LowerCaseTokenizer, for example, you'd write an
/// analyzer like this:
/// <P>
- /// <PRE class="prettyprint">
+ /// <code>
/// class MyAnalyzer extends Analyzer {
/// {@literal @Override}
/// protected virtual TokenStreamComponents CreateComponents(string fieldName, TextReader reader) {
@@ -38,15 +38,15 @@ namespace Lucene.Net.Analysis.En
/// return new TokenStreamComponents(source, new PorterStemFilter(source));
/// }
/// }
- /// </PRE>
+ /// </code>
/// <para>
- /// Note: This filter is aware of the <seealso cref="KeywordAttribute"/>. To prevent
+ /// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
/// certain terms from being passed to the stemmer
- /// <seealso cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
- /// in a previous <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+ /// in a previous <see cref="TokenStream"/>.
///
/// Note: For including the original term as well as the stemmed version, see
- /// <seealso cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+ /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
/// </para>
/// </summary>
public sealed class PorterStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs
index 27217e1..2cc4831 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.En
//using TokenFilterFactory = org.apache.lucene.analysis.util.TokenFilterFactory;
/// <summary>
- /// Factory for <seealso cref="PorterStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PorterStemFilter"/>.
+ /// <code>
/// <fieldType name="text_porterstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.PorterStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PorterStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
index 387ae1e..edcde59 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
@@ -29,11 +29,11 @@ namespace Lucene.Net.Analysis.Es
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Spanish.
+ /// <see cref="Analyzer"/> for Spanish.
/// <para>
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating SpanishAnalyzer:
/// <ul>
/// <li> As of 3.6, SpanishLightStemFilter is used for less aggressive stemming.
@@ -88,7 +88,7 @@ namespace Lucene.Net.Analysis.Es
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public SpanishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -107,7 +107,7 @@ namespace Lucene.Net.Analysis.Es
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -121,15 +121,15 @@ namespace Lucene.Net.Analysis.Es
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SpanishLightStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SpanishLightStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
index 8587fbd..b7810fc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Es
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="SpanishLightStemmer"/> to stem Spanish
+ /// A <see cref="TokenFilter"/> that applies <see cref="SpanishLightStemmer"/> to stem Spanish
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class SpanishLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
index 5088e0a..31ef33b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Es
*/
/// <summary>
- /// Factory for <seealso cref="SpanishLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="SpanishLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_eslgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.SpanishLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class SpanishLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
index d575922..b6c20dc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Eu
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Basque.
+ /// <see cref="Analyzer"/> for Basque.
/// </summary>
public sealed class BasqueAnalyzer : StopwordAnalyzerBase
{
@@ -74,7 +74,7 @@ namespace Lucene.Net.Analysis.Eu
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public BasqueAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -93,7 +93,7 @@ namespace Lucene.Net.Analysis.Eu
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -107,15 +107,15 @@ namespace Lucene.Net.Analysis.Eu
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
index 853d202..97943be 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
@@ -26,9 +26,9 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Persian.
+ /// <see cref="Analyzer"/> for Persian.
/// <para>
- /// This Analyzer uses <seealso cref="PersianCharFilter"/> which implies tokenizing around
+ /// This Analyzer uses <see cref="PersianCharFilter"/> which implies tokenizing around
/// zero-width non-joiner in addition to whitespace. Some persian-specific variant forms (such as farsi
/// yeh and keheh) are standardized. "Stemming" is accomplished via stopwords.
/// </para>
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Fa
/// <summary>
/// Builds an analyzer with the default stop words:
- /// <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public PersianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -110,13 +110,13 @@ namespace Lucene.Net.Analysis.Fa
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="LowerCaseFilter"/>, <seealso cref="ArabicNormalizationFilter"/>,
- /// <seealso cref="PersianNormalizationFilter"/> and Persian Stop words </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="LowerCaseFilter"/>, <see cref="ArabicNormalizationFilter"/>,
+ /// <see cref="PersianNormalizationFilter"/> and Persian Stop words </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source;
@@ -144,7 +144,7 @@ namespace Lucene.Net.Analysis.Fa
}
/// <summary>
- /// Wraps the TextReader with <seealso cref="PersianCharFilter"/>
+ /// Wraps the TextReader with <see cref="PersianCharFilter"/>
/// </summary>
protected override TextReader InitReader(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
index 5083bf9..260b530 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// Factory for <seealso cref="PersianCharFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PersianCharFilter"/>.
+ /// <code>
/// <fieldType name="text_fa" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <charFilter class="solr.PersianCharFilterFactory"/>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PersianCharFilterFactory : CharFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
index f1ff394..f3338ab 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="PersianNormalizer"/> to normalize the
+ /// A <see cref="TokenFilter"/> that applies <see cref="PersianNormalizer"/> to normalize the
/// orthography.
///
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
index e18ef09..be205af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// Factory for <seealso cref="PersianNormalizationFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PersianNormalizationFilter"/>.
+ /// <code>
/// <fieldType name="text_fanormal" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <charFilter class="solr.PersianCharFilterFactory"/>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.PersianNormalizationFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PersianNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
index 6b3e850..28af6f5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Fi
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Finnish.
+ /// <see cref="Analyzer"/> for Finnish.
/// </summary>
public sealed class FinnishAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Fi
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public FinnishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Fi
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Fi
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs
index d1fa00a..2082eb0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.Fi
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="FinnishLightStemmer"/> to stem Finnish
+ /// A <see cref="TokenFilter"/> that applies <see cref="FinnishLightStemmer"/> to stem Finnish
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class FinnishLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs
index 411809c..ce982c5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Fi
*/
/// <summary>
- /// Factory for <seealso cref="FinnishLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="FinnishLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_filgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.FinnishLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class FinnishLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
index 1f82309..b9c01d2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for French language.
+ /// <see cref="Analyzer"/> for French language.
/// <para>
/// Supports an external list of stopwords (words that
/// will not be indexed at all) and an external list of exclusions (word that will
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Fr
/// </para>
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating FrenchAnalyzer:
/// <ul>
/// <li> As of 3.6, FrenchLightStemFilter is used for less aggressive stemming.
@@ -51,8 +51,8 @@ namespace Lucene.Net.Analysis.Fr
/// </ul>
///
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>.</para>
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class FrenchAnalyzer : StopwordAnalyzerBase
{
@@ -118,7 +118,7 @@ namespace Lucene.Net.Analysis.Fr
}
/// <summary>
- /// Builds an analyzer with the default stop words (<seealso cref="#getDefaultStopSet"/>).
+ /// Builds an analyzer with the default stop words (<see cref="#getDefaultStopSet"/>).
/// </summary>
public FrenchAnalyzer(LuceneVersion matchVersion)
#pragma warning disable 612, 618
@@ -157,15 +157,15 @@ namespace Lucene.Net.Analysis.Fr
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="ElisionFilter"/>,
- /// <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>,
- /// <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided, and <seealso cref="FrenchLightStemFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>,
+ /// <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
+ /// <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided, and <see cref="FrenchLightStemFilter"/> </returns>
///
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs
index e82433e..cd97757 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="FrenchLightStemmer"/> to stem French
+ /// A <see cref="TokenFilter"/> that applies <see cref="FrenchLightStemmer"/> to stem French
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class FrenchLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs
index 9bc0dd8..548489b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// Factory for <seealso cref="FrenchLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="FrenchLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_frlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Fr
/// <filter class="solr.ElisionFilterFactory"/>
/// <filter class="solr.FrenchLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class FrenchLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs
index f18fe1f..2cdf579 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="FrenchMinimalStemmer"/> to stem French
+ /// A <see cref="TokenFilter"/> that applies <see cref="FrenchMinimalStemmer"/> to stem French
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class FrenchMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs
index d9cc419..ef587d9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs
@@ -20,8 +20,8 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// Factory for <seealso cref="FrenchMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="FrenchMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_frminstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Fr
/// <filter class="solr.ElisionFilterFactory"/>
/// <filter class="solr.FrenchMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class FrenchMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs
index 151e82b..f74b10a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs
@@ -21,19 +21,19 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that stems french words.
+ /// A <see cref="TokenFilter"/> that stems french words.
/// <para>
/// The used stemmer can be changed at runtime after the
- /// filter object is created (as long as it is a <seealso cref="FrenchStemmer"/>).
+ /// filter object is created (as long as it is a <see cref="FrenchStemmer"/>).
/// </para>
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="KeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para> </summary>
/// <seealso cref= KeywordMarkerFilter </seealso>
- /// @deprecated (3.1) Use <seealso cref="SnowballFilter"/> with
- /// <seealso cref="org.tartarus.snowball.ext.FrenchStemmer"/> instead, which has the
+ /// @deprecated (3.1) Use <see cref="SnowballFilter"/> with
+ /// <see cref="org.tartarus.snowball.ext.FrenchStemmer"/> instead, which has the
/// same functionality. This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use SnowballFilter with FrenchStemmer instead, which has the same functionality. This filter will be removed in Lucene 5.0")]
public sealed class FrenchStemFilter : TokenFilter
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Fr
}
}
/// <summary>
- /// Set a alternative/custom <seealso cref="FrenchStemmer"/> for this filter.
+ /// Set a alternative/custom <see cref="FrenchStemmer"/> for this filter.
/// </summary>
public FrenchStemmer Stemmer
{
[06/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs
index d46b6c5..15bf32a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Old Broken version of <seealso cref="WordDelimiterFilter"/>
+ /// Old Broken version of <see cref="WordDelimiterFilter"/>
/// </summary>
[Obsolete]
public sealed class Lucene47WordDelimiterFilter : TokenFilter
@@ -170,7 +170,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Creates a new WordDelimiterFilter using <seealso cref="WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE"/>
+ /// Creates a new WordDelimiterFilter using <see cref="WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE"/>
/// as its charTypeTable
/// </summary>
/// <param name="in"> TokenStream to be filtered </param>
@@ -460,7 +460,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#ALPHA"/>
+ /// Checks if the given word type includes <see cref="#ALPHA"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains ALPHA, {@code false} otherwise </returns>
@@ -470,7 +470,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#DIGIT"/>
+ /// Checks if the given word type includes <see cref="#DIGIT"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains DIGIT, {@code false} otherwise </returns>
@@ -480,7 +480,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#SUBWORD_DELIM"/>
+ /// Checks if the given word type includes <see cref="#SUBWORD_DELIM"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains SUBWORD_DELIM, {@code false} otherwise </returns>
@@ -490,7 +490,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#UPPER"/>
+ /// Checks if the given word type includes <see cref="#UPPER"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains UPPER, {@code false} otherwise </returns>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
index a3e6a9e..e33b446 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
@@ -28,32 +28,32 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// Efficient Lucene analyzer/tokenizer that preferably operates on a String rather than a
- /// <seealso cref="TextReader"/>, that can flexibly separate text into terms via a regular expression <seealso cref="Pattern"/>
- /// (with behaviour identical to <seealso cref="String#split(String)"/>),
+ /// <see cref="TextReader"/>, that can flexibly separate text into terms via a regular expression <see cref="Pattern"/>
+ /// (with behaviour identical to <see cref="String#split(String)"/>),
/// and that combines the functionality of
- /// <seealso cref="LetterTokenizer"/>,
- /// <seealso cref="LowerCaseTokenizer"/>,
- /// <seealso cref="WhitespaceTokenizer"/>,
- /// <seealso cref="StopFilter"/> into a single efficient
+ /// <see cref="LetterTokenizer"/>,
+ /// <see cref="LowerCaseTokenizer"/>,
+ /// <see cref="WhitespaceTokenizer"/>,
+ /// <see cref="StopFilter"/> into a single efficient
/// multi-purpose class.
/// <para>
/// If you are unsure how exactly a regular expression should look like, consider
/// prototyping by simply trying various expressions on some test texts via
- /// <seealso cref="String#split(String)"/>. Once you are satisfied, give that regex to
+ /// <see cref="String#split(String)"/>. Once you are satisfied, give that regex to
/// PatternAnalyzer. Also see <a target="_blank"
/// href="http://java.sun.com/docs/books/tutorial/extra/regex/">Java Regular Expression Tutorial</a>.
/// </para>
/// <para>
/// This class can be considerably faster than the "normal" Lucene tokenizers.
/// It can also serve as a building block in a compound Lucene
- /// <seealso cref="TokenFilter"/> chain. For example as in this
+ /// <see cref="TokenFilter"/> chain. For example as in this
/// stemming example:
/// <pre>
/// PatternAnalyzer pat = ...
/// TokenStream tokenStream = new SnowballFilter(
/// pat.tokenStream("content", "James is running round in the woods"),
/// "English"));
- /// </pre>
+ /// </code>
/// </para>
/// </summary>
/// @deprecated (4.0) use the pattern-based analysis in the analysis/pattern package instead.
@@ -152,8 +152,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// if non-null, ignores all tokens that are contained in the
/// given stop set (after previously having applied toLowerCase()
/// if applicable). For example, created via
- /// <seealso cref="StopFilter#makeStopSet(Version, String[])"/>and/or
- /// <seealso cref="WordlistLoader"/>as in
+ /// <see cref="StopFilter#makeStopSet(Version, String[])"/>and/or
+ /// <see cref="WordlistLoader"/>as in
/// <code>WordlistLoader.getWordSet(new File("samples/fulltext/stopwords.txt")</code>
/// or <a href="http://www.unine.ch/info/clef/">other stop words
/// lists </a>. </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
index 5180127..200a934 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Marks terms as keywords via the <seealso cref="KeywordAttribute"/>. Each token
+ /// Marks terms as keywords via the <see cref="KeywordAttribute"/>. Each token
/// that matches the provided pattern is marked as a keyword by setting
- /// <seealso cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
+ /// <see cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
/// </summary>
public sealed class PatternKeywordMarkerFilter : KeywordMarkerFilter
{
@@ -32,9 +32,9 @@ namespace Lucene.Net.Analysis.Miscellaneous
private readonly Regex pattern;
/// <summary>
- /// Create a new <seealso cref="PatternKeywordMarkerFilter"/>, that marks the current
+ /// Create a new <see cref="PatternKeywordMarkerFilter"/>, that marks the current
/// token as a keyword if the tokens term buffer matches the provided
- /// <seealso cref="Pattern"/> via the <seealso cref="KeywordAttribute"/>.
+ /// <see cref="Pattern"/> via the <see cref="KeywordAttribute"/>.
/// </summary>
/// <param name="in">
/// TokenStream to filter </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs
index 8a73498..32e9fa0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs
@@ -25,12 +25,12 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// This analyzer is used to facilitate scenarios where different
/// fields Require different analysis techniques. Use the Map
- /// argument in <seealso cref="#PerFieldAnalyzerWrapper(Analyzer, java.util.Map)"/>
+ /// argument in <see cref="#PerFieldAnalyzerWrapper(Analyzer, java.util.Map)"/>
/// to add non-default analyzers for fields.
///
/// <para>Example usage:
///
- /// <pre class="prettyprint">
+ /// <code>
/// {@code
/// Map<String,Analyzer> analyzerPerField = new HashMap<>();
/// analyzerPerField.put("firstname", new KeywordAnalyzer());
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// PerFieldAnalyzerWrapper aWrapper =
/// new PerFieldAnalyzerWrapper(new StandardAnalyzer(version), analyzerPerField);
/// }
- /// </pre>
+ /// </code>
///
/// </para>
/// <para>In this example, StandardAnalyzer will be used for all fields except "firstname"
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
index 826e05b..f968659 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
@@ -18,7 +18,7 @@
*/
/// <summary>
- /// Links two <seealso cref="PrefixAwareTokenFilter"/>.
+ /// Links two <see cref="PrefixAwareTokenFilter"/>.
/// <p/>
/// <b>NOTE:</b> This filter might not behave correctly if used with custom Attributes, i.e. Attributes other than
/// the ones located in org.apache.lucene.analysis.tokenattributes.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs
index 83b3ca7..1554866 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="RemoveDuplicatesTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="RemoveDuplicatesTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_rmdup" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class RemoveDuplicatesTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs
index 833e751..51b115a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// This filter folds Scandinavian characters ������->a and ����->o.
/// It also discriminate against use of double vowels aa, ae, ao, oe and oo, leaving just the first one.
/// <p/>
- /// It's is a semantically more destructive solution than <seealso cref="ScandinavianNormalizationFilter"/> but
+ /// It's is a semantically more destructive solution than <see cref="ScandinavianNormalizationFilter"/> but
/// can in addition help with matching raksmorgas as r�ksm�rg�s.
/// <p/>
/// bl�b�rsyltet�j == bl�b�rsyltet�j == blaabaarsyltetoej == blabarsyltetoj
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs
index ed182a3..c6930b2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="ScandinavianFoldingFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ScandinavianFoldingFilter"/>.
+ /// <code>
/// <fieldType name="text_scandfold" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ScandinavianFoldingFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ScandinavianFoldingFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs
index 502eaef..5ad937b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// This filter normalize use of the interchangeable Scandinavian characters ��������
/// and folded variants (aa, ao, ae, oe and oo) by transforming them to ������.
/// <p/>
- /// It's a semantically less destructive solution than <seealso cref="ScandinavianFoldingFilter"/>,
+ /// It's a semantically less destructive solution than <see cref="ScandinavianFoldingFilter"/>,
/// most useful when a person with a Norwegian or Danish keyboard queries a Swedish index
/// and vice versa. This filter does <b>not</b> the common Swedish folds of � and � to a nor � to o.
/// <p/>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs
index 1068d08..e5a5832 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter"/>.
+ /// <code>
/// <fieldType name="text_scandnorm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ScandinavianNormalizationFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ScandinavianNormalizationFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs
index fdddf81..f4adbfe 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Marks terms as keywords via the <seealso cref="KeywordAttribute"/>. Each token
+ /// Marks terms as keywords via the <see cref="KeywordAttribute"/>. Each token
/// contained in the provided set is marked as a keyword by setting
- /// <seealso cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
+ /// <see cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
/// </summary>
public sealed class SetKeywordMarkerFilter : KeywordMarkerFilter
{
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// Create a new KeywordSetMarkerFilter, that marks the current token as a
/// keyword if the tokens term buffer is contained in the given set via the
- /// <seealso cref="KeywordAttribute"/>.
+ /// <see cref="KeywordAttribute"/>.
/// </summary>
/// <param name="in">
/// TokenStream to filter </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs
index 15d2c5e..f2c00ce 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// A <seealso cref="TokenStream"/> containing a single token.
+ /// A <see cref="TokenStream"/> containing a single token.
/// </summary>
public sealed class SingleTokenTokenStream : TokenStream
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs
index a3b5bea..0e09209 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Provides the ability to override any <seealso cref="KeywordAttribute"/> aware stemmer
+ /// Provides the ability to override any <see cref="KeywordAttribute"/> aware stemmer
/// with custom dictionary-based stemming.
/// </summary>
public sealed class StemmerOverrideFilter : TokenFilter
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// Create a new StemmerOverrideFilter, performing dictionary-based stemming
/// with the provided <code>dictionary</code>.
/// <para>
- /// Any dictionary-stemmed terms will be marked with <seealso cref="KeywordAttribute"/>
+ /// Any dictionary-stemmed terms will be marked with <see cref="KeywordAttribute"/>
/// so that they will not be stemmed with stemmers down the chain.
/// </para>
/// </summary>
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// A read-only 4-byte FST backed map that allows fast case-insensitive key
- /// value lookups for <seealso cref="StemmerOverrideFilter"/>
+ /// value lookups for <see cref="StemmerOverrideFilter"/>
/// </summary>
// TODO maybe we can generalize this and reuse this map somehow?
public sealed class StemmerOverrideMap
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
internal readonly bool ignoreCase;
/// <summary>
- /// Creates a new <seealso cref="StemmerOverrideMap"/> </summary>
+ /// Creates a new <see cref="StemmerOverrideMap"/> </summary>
/// <param name="fst"> the fst to lookup the overrides </param>
/// <param name="ignoreCase"> if the keys case should be ingored </param>
public StemmerOverrideMap(FST<BytesRef> fst, bool ignoreCase)
@@ -108,7 +108,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Returns a <seealso cref="BytesReader"/> to pass to the <seealso cref="#get(char[], int, FST.Arc, FST.BytesReader)"/> method.
+ /// Returns a <see cref="BytesReader"/> to pass to the <see cref="#get(char[], int, FST.Arc, FST.BytesReader)"/> method.
/// </summary>
public FST.BytesReader BytesReader
{
@@ -153,7 +153,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// This builder builds an <seealso cref="FST"/> for the <seealso cref="StemmerOverrideFilter"/>
+ /// This builder builds an <see cref="FST"/> for the <see cref="StemmerOverrideFilter"/>
/// </summary>
public class Builder
{
@@ -164,7 +164,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
internal readonly CharsRef charsSpare = new CharsRef();
/// <summary>
- /// Creates a new <seealso cref="Builder"/> with ignoreCase set to <code>false</code>
+ /// Creates a new <see cref="Builder"/> with ignoreCase set to <code>false</code>
/// </summary>
public Builder()
: this(false)
@@ -172,7 +172,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Creates a new <seealso cref="Builder"/> </summary>
+ /// Creates a new <see cref="Builder"/> </summary>
/// <param name="ignoreCase"> if the input case should be ignored. </param>
public Builder(bool ignoreCase)
{
@@ -212,9 +212,9 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Returns an <seealso cref="StemmerOverrideMap"/> to be used with the <seealso cref="StemmerOverrideFilter"/> </summary>
- /// <returns> an <seealso cref="StemmerOverrideMap"/> to be used with the <seealso cref="StemmerOverrideFilter"/> </returns>
- /// <exception cref="IOException"> if an <seealso cref="IOException"/> occurs; </exception>
+ /// Returns an <see cref="StemmerOverrideMap"/> to be used with the <see cref="StemmerOverrideFilter"/> </summary>
+ /// <returns> an <see cref="StemmerOverrideMap"/> to be used with the <see cref="StemmerOverrideFilter"/> </returns>
+ /// <exception cref="IOException"> if an <see cref="IOException"/> occurs; </exception>
public virtual StemmerOverrideMap Build()
{
ByteSequenceOutputs outputs = ByteSequenceOutputs.Singleton;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs
index 6934c91..e0c9323 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs
@@ -23,14 +23,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="StemmerOverrideFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="StemmerOverrideFilter"/>.
+ /// <code>
/// <fieldType name="text_dicstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.StemmerOverrideFilterFactory" dictionary="dictionary.txt" ignoreCase="false"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class StemmerOverrideFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
index aec9f50..98539c7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
@@ -34,7 +34,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
private readonly IOffsetAttribute offsetAtt;
/// <summary>
- /// Create a new <seealso cref="TrimFilter"/>. </summary>
+ /// Create a new <see cref="TrimFilter"/>. </summary>
/// <param name="version"> the Lucene match version </param>
/// <param name="in"> the stream to consume </param>
/// <param name="updateOffsets"> whether to update offsets </param>
@@ -53,7 +53,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Create a new <seealso cref="TrimFilter"/> on top of <code>in</code>. </summary>
+ /// Create a new <see cref="TrimFilter"/> on top of <code>in</code>. </summary>
public TrimFilter(LuceneVersion version, TokenStream @in)
#pragma warning disable 612, 618
: this(version, @in, false)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
index 1b47ea7..d091842 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="TrimFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="TrimFilter"/>.
+ /// <code>
/// <fieldType name="text_trm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.NGramTokenizerFactory"/>
/// <filter class="solr.TrimFilterFactory" />
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
/// <seealso cref= TrimFilter </seealso>
public class TrimFilterFactory : TokenFilterFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs
index a3577af..2b738ec 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter"/>. The following type is recommended for "<i>diacritics-insensitive search</i>" for Turkish.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter"/>. The following type is recommended for "<i>diacritics-insensitive search</i>" for Turkish.
+ /// <code>
/// <fieldType name="text_tr_ascii_f5" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <filter class="solr.TruncateTokenFilterFactory" prefixLength="5"/>
/// <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class TruncateTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
index 77f643e..3c639d2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
@@ -64,14 +64,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// </ul>
/// </li>
/// </ul>
- /// One use for <seealso cref="WordDelimiterFilter"/> is to help match words with different
+ /// One use for <see cref="WordDelimiterFilter"/> is to help match words with different
/// subword delimiters. For example, if the source text contained "wi-fi" one may
/// want "wifi" "WiFi" "wi-fi" "wi+fi" queries to all match. One way of doing so
/// is to specify combinations="1" in the analyzer used for indexing, and
/// combinations="0" (the default) in the analyzer used for querying. Given that
- /// the current <seealso cref="StandardTokenizer"/> immediately removes many intra-word
+ /// the current <see cref="StandardTokenizer"/> immediately removes many intra-word
/// delimiters, it is recommended that this filter be used after a tokenizer that
- /// does not do this (such as <seealso cref="WhitespaceTokenizer"/>).
+ /// does not do this (such as <see cref="WhitespaceTokenizer"/>).
/// </summary>
public sealed class WordDelimiterFilter : TokenFilter
{
@@ -225,7 +225,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Creates a new WordDelimiterFilter using <seealso cref="WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE"/>
+ /// Creates a new WordDelimiterFilter using <see cref="WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE"/>
/// as its charTypeTable
/// </summary>
/// <param name="in"> TokenStream to be filtered </param>
@@ -605,7 +605,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#ALPHA"/>
+ /// Checks if the given word type includes <see cref="#ALPHA"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains ALPHA, {@code false} otherwise </returns>
@@ -615,7 +615,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#DIGIT"/>
+ /// Checks if the given word type includes <see cref="#DIGIT"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains DIGIT, {@code false} otherwise </returns>
@@ -625,7 +625,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#SUBWORD_DELIM"/>
+ /// Checks if the given word type includes <see cref="#SUBWORD_DELIM"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains SUBWORD_DELIM, {@code false} otherwise </returns>
@@ -635,7 +635,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#UPPER"/>
+ /// Checks if the given word type includes <see cref="#UPPER"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains UPPER, {@code false} otherwise </returns>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
index ce4959c..a0cc42d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
@@ -27,8 +27,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="WordDelimiterFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="WordDelimiterFilter"/>.
+ /// <code>
/// <fieldType name="text_wd" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// generateWordParts="1" generateNumberParts="1" stemEnglishPossessive="1"
/// types="wdfftypes.txt" />
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class WordDelimiterFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
index 3fe61b6..f507cf2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
@@ -124,7 +124,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// Advance to the next subword in the string.
/// </summary>
- /// <returns> index of the next subword, or <seealso cref="#DONE"/> if all subwords have been returned </returns>
+ /// <returns> index of the next subword, or <see cref="#DONE"/> if all subwords have been returned </returns>
internal int Next()
{
current = end;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs
index a740241..2efb5fc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Creates new instances of <seealso cref="EdgeNGramTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Creates new instances of <see cref="EdgeNGramTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_edgngrm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.EdgeNGramFilterFactory" minGramSize="1" maxGramSize="1"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class EdgeNGramFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs
index 6224080..01677cf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs
@@ -25,10 +25,10 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Tokenizes the given token into n-grams of given size(s).
/// <para>
- /// This <seealso cref="TokenFilter"/> create n-grams from the beginning edge or ending edge of a input token.
+ /// This <see cref="TokenFilter"/> create n-grams from the beginning edge or ending edge of a input token.
/// </para>
/// <para><a name="version"/>As of Lucene 4.4, this filter does not support
- /// <seealso cref="Side#BACK"/> (you can use <seealso cref="ReverseStringFilter"/> up-front and
+ /// <see cref="Side#BACK"/> (you can use <see cref="ReverseStringFilter"/> up-front and
/// afterward to get the same behavior), handles supplementary characters
/// correctly and does not update offsets anymore.
/// </para>
@@ -89,8 +89,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
- /// <param name="side"> the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="side"> the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -139,8 +139,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
- /// <param name="sideLabel"> the name of the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="sideLabel"> the name of the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -153,7 +153,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public EdgeNGramTokenFilter(LuceneVersion version, TokenStream input, int minGram, int maxGram)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs
index a4fc18f..09ad7f8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs
@@ -23,19 +23,19 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Tokenizes the input from an edge into n-grams of given size(s).
/// <para>
- /// This <seealso cref="Tokenizer"/> create n-grams from the beginning edge or ending edge of a input token.
+ /// This <see cref="Tokenizer"/> create n-grams from the beginning edge or ending edge of a input token.
/// </para>
/// <para><a name="version" /> As of Lucene 4.4, this tokenizer<ul>
/// <li>can handle <code>maxGram</code> larger than 1024 chars, but beware that this will result in increased memory usage
/// <li>doesn't trim the input,
/// <li>sets position increments equal to 1 instead of 1 for the first token and 0 for all other ones
/// <li>doesn't support backward n-grams anymore.
- /// <li>supports <seealso cref="#isTokenChar(int) pre-tokenization"/>,
+ /// <li>supports <see cref="#isTokenChar(int) pre-tokenization"/>,
/// <li>correctly handles supplementary characters.
/// </ul>
/// </para>
/// <para>Although <b style="color:red">highly</b> discouraged, it is still possible
- /// to use the old behavior through <seealso cref="Lucene43EdgeNGramTokenizer"/>.
+ /// to use the old behavior through <see cref="Lucene43EdgeNGramTokenizer"/>.
/// </para>
/// </summary>
public class EdgeNGramTokenizer : NGramTokenizer
@@ -47,7 +47,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public EdgeNGramTokenizer(LuceneVersion version, TextReader input, int minGram, int maxGram)
@@ -59,8 +59,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public EdgeNGramTokenizer(LuceneVersion version, AttributeSource.AttributeFactory factory, TextReader input, int minGram, int maxGram)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs
index aebf551..5273ae4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs
@@ -24,13 +24,13 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Creates new instances of <seealso cref="EdgeNGramTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Creates new instances of <see cref="EdgeNGramTokenizer"/>.
+ /// <code>
/// <fieldType name="text_edgngrm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.EdgeNGramTokenizerFactory" minGramSize="1" maxGramSize="1"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class EdgeNGramTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs
index d6f29c2..3ed7187 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Old version of <seealso cref="EdgeNGramTokenizer"/> which doesn't handle correctly
+ /// Old version of <see cref="EdgeNGramTokenizer"/> which doesn't handle correctly
/// supplementary characters.
/// </summary>
[Obsolete]
@@ -76,8 +76,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
- /// <param name="side"> the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="side"> the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -91,9 +91,9 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
- /// <param name="side"> the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="side"> the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -107,8 +107,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
- /// <param name="sideLabel"> the name of the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="sideLabel"> the name of the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -121,9 +121,9 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
- /// <param name="sideLabel"> the name of the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="sideLabel"> the name of the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -136,7 +136,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public Lucene43EdgeNGramTokenizer(LuceneVersion version, TextReader input, int minGram, int maxGram)
@@ -148,8 +148,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public Lucene43EdgeNGramTokenizer(LuceneVersion version, AttributeFactory factory, TextReader input, int minGram, int maxGram)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs
index 5d8d410..a0f210a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Old broken version of <seealso cref="NGramTokenizer"/>.
+ /// Old broken version of <see cref="NGramTokenizer"/>.
/// </summary>
[Obsolete]
public sealed class Lucene43NGramTokenizer : Tokenizer
@@ -43,7 +43,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with given min and max n-grams. </summary>
- /// <param name="input"> <seealso cref="TextReader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TextReader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public Lucene43NGramTokenizer(TextReader input, int minGram, int maxGram)
@@ -54,8 +54,8 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with given min and max n-grams. </summary>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public Lucene43NGramTokenizer(AttributeFactory factory, TextReader input, int minGram, int maxGram)
@@ -66,7 +66,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with default min and max n-grams. </summary>
- /// <param name="input"> <seealso cref="TextReader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TextReader"/> holding the input to be tokenized </param>
public Lucene43NGramTokenizer(TextReader input)
: this(input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs
index 3de3466..3c9f738 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Factory for <seealso cref="NGramTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NGramTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_ngrm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.NGramFilterFactory" minGramSize="1" maxGramSize="2"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NGramFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs
index 10cd39c..561e575 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs
@@ -25,8 +25,8 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Tokenizes the input into n-grams of the given size(s).
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/> compatibility when
- /// creating a <seealso cref="NGramTokenFilter"/>. As of Lucene 4.4, this token filters:<ul>
+ /// <para>You must specify the required <see cref="LuceneVersion"/> compatibility when
+ /// creating a <see cref="NGramTokenFilter"/>. As of Lucene 4.4, this token filters:<ul>
/// <li>handles supplementary characters correctly,</li>
/// <li>emits all n-grams for the same token at the same position,</li>
/// <li>does not modify offsets,</li>
@@ -35,14 +35,14 @@ namespace Lucene.Net.Analysis.Ngram
/// "c").</li></ul>
/// </para>
/// <para>You can make this filter use the old behavior by providing a version <
- /// <seealso cref="Version#LUCENE_44"/> in the constructor but this is not recommended as
- /// it will lead to broken <seealso cref="TokenStream"/>s that will cause highlighting
+ /// <see cref="Version#LUCENE_44"/> in the constructor but this is not recommended as
+ /// it will lead to broken <see cref="TokenStream"/>s that will cause highlighting
/// bugs.
/// </para>
- /// <para>If you were using this <seealso cref="TokenFilter"/> to perform partial highlighting,
+ /// <para>If you were using this <see cref="TokenFilter"/> to perform partial highlighting,
/// this won't work anymore since this filter doesn't update offsets. You should
- /// modify your analysis chain to use <seealso cref="NGramTokenizer"/>, and potentially
- /// override <seealso cref="NGramTokenizer#isTokenChar(int)"/> to perform pre-tokenization.
+ /// modify your analysis chain to use <see cref="NGramTokenizer"/>, and potentially
+ /// override <see cref="NGramTokenizer#isTokenChar(int)"/> to perform pre-tokenization.
/// </para>
/// </summary>
public sealed class NGramTokenFilter : TokenFilter
@@ -73,7 +73,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates NGramTokenFilter with given min and max n-grams. </summary>
/// <param name="version"> Lucene version to enable correct position increments.
/// See <a href="#version">above</a> for details. </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public NGramTokenFilter(LuceneVersion version, TokenStream input, int minGram, int maxGram)
@@ -157,7 +157,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates NGramTokenFilter with default min and max n-grams. </summary>
/// <param name="version"> Lucene version to enable correct position increments.
/// See <a href="#version">above</a> for details. </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
public NGramTokenFilter(LuceneVersion version, TokenStream input)
: this(version, input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
index bae9f38..acc42c3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Tokenizes the input into n-grams of the given size(s).
- /// <para>On the contrary to <seealso cref="NGramTokenFilter"/>, this class sets offsets so
+ /// <para>On the contrary to <see cref="NGramTokenFilter"/>, this class sets offsets so
/// that characters between startOffset and endOffset in the original stream are
/// the same as the term chars.
/// </para>
@@ -45,7 +45,7 @@ namespace Lucene.Net.Analysis.Ngram
/// than 1024 chars (limit of the previous version),
/// <li>count grams based on unicode code points instead of java chars (and
/// never split in the middle of surrogate pairs),
- /// <li>give the ability to <seealso cref="#isTokenChar(int) pre-tokenize"/> the stream
+ /// <li>give the ability to <see cref="#isTokenChar(int) pre-tokenize"/> the stream
/// before computing n-grams.</ul>
/// </para>
/// <para>Additionally, this class doesn't trim trailing whitespaces and emits
@@ -54,7 +54,7 @@ namespace Lucene.Net.Analysis.Ngram
/// from supporting large input streams).
/// </para>
/// <para>Although <b style="color:red">highly</b> discouraged, it is still possible
- /// to use the old behavior through <seealso cref="Lucene43NGramTokenizer"/>.
+ /// to use the old behavior through <see cref="Lucene43NGramTokenizer"/>.
/// </para>
/// </summary>
// non-final to allow for overriding isTokenChar, but all other methods should be final
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with given min and max n-grams. </summary>
/// <param name="version"> the lucene compatibility <a href="#version">version</a> </param>
- /// <param name="input"> <seealso cref="TextReader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TextReader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public NGramTokenizer(LuceneVersion version, TextReader input, int minGram, int maxGram)
@@ -106,8 +106,8 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with given min and max n-grams. </summary>
/// <param name="version"> the lucene compatibility <a href="#version">version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public NGramTokenizer(LuceneVersion version, AttributeFactory factory, TextReader input, int minGram, int maxGram)
@@ -118,7 +118,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with default min and max n-grams. </summary>
/// <param name="version"> the lucene compatibility <a href="#version">version</a> </param>
- /// <param name="input"> <seealso cref="TextReader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TextReader"/> holding the input to be tokenized </param>
public NGramTokenizer(LuceneVersion version, TextReader input)
: this(version, input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs
index 6aaab8b..73865fb 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Factory for <seealso cref="NGramTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NGramTokenizer"/>.
+ /// <code>
/// <fieldType name="text_ngrm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.NGramTokenizerFactory" minGramSize="1" maxGramSize="2"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NGramTokenizerFactory : TokenizerFactory
{
@@ -50,7 +50,7 @@ namespace Lucene.Net.Analysis.Ngram
}
/// <summary>
- /// Creates the <seealso cref="TokenStream"/> of n-grams from the given <seealso cref="TextReader"/> and <seealso cref="AttributeSource.AttributeFactory"/>. </summary>
+ /// Creates the <see cref="TokenStream"/> of n-grams from the given <see cref="TextReader"/> and <see cref="AttributeSource.AttributeFactory"/>. </summary>
public override Tokenizer Create(AttributeSource.AttributeFactory factory, TextReader input)
{
#pragma warning disable 612, 618
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
index a3430a9..d38d922 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Nl
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Dutch language.
+ /// <see cref="Analyzer"/> for Dutch language.
/// <para>
/// Supports an external list of stopwords (words that
/// will not be indexed at all), an external list of exclusions (word that will
@@ -39,11 +39,11 @@ namespace Lucene.Net.Analysis.Nl
/// </para>
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating DutchAnalyzer:
/// <ul>
- /// <li> As of 3.6, <seealso cref="#DutchAnalyzer(Version, CharArraySet)"/> and
- /// <seealso cref="#DutchAnalyzer(Version, CharArraySet, CharArraySet)"/> also populate
+ /// <li> As of 3.6, <see cref="#DutchAnalyzer(Version, CharArraySet)"/> and
+ /// <see cref="#DutchAnalyzer(Version, CharArraySet, CharArraySet)"/> also populate
/// the default entries for the stem override dictionary
/// <li> As of 3.1, Snowball stemming is done with SnowballFilter,
/// LowerCaseFilter is used prior to StopFilter, and Snowball
@@ -53,8 +53,8 @@ namespace Lucene.Net.Analysis.Nl
/// </ul>
///
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>.</para>
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class DutchAnalyzer : Analyzer
{
@@ -123,7 +123,7 @@ namespace Lucene.Net.Analysis.Nl
private readonly LuceneVersion matchVersion;
/// <summary>
- /// Builds an analyzer with the default stop words (<seealso cref="#getDefaultStopSet()"/>)
+ /// Builds an analyzer with the default stop words (<see cref="#getDefaultStopSet()"/>)
/// and a few default entries for the stem exclusion table.
///
/// </summary>
@@ -192,13 +192,13 @@ namespace Lucene.Net.Analysis.Nl
}
/// <summary>
- /// Returns a (possibly reused) <seealso cref="TokenStream"/> which tokenizes all the
- /// text in the provided <seealso cref="Reader"/>.
+ /// Returns a (possibly reused) <see cref="TokenStream"/> which tokenizes all the
+ /// text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> A <seealso cref="TokenStream"/> built from a <seealso cref="StandardTokenizer"/>
- /// filtered with <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>,
- /// <seealso cref="StopFilter"/>, <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is provided,
- /// <seealso cref="StemmerOverrideFilter"/>, and <seealso cref="SnowballFilter"/> </returns>
+ /// <returns> A <see cref="TokenStream"/> built from a <see cref="StandardTokenizer"/>
+ /// filtered with <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>,
+ /// <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is provided,
+ /// <see cref="StemmerOverrideFilter"/>, and <see cref="SnowballFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader aReader)
{
#pragma warning disable 612, 618
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
index 53eb7ac..ba3d181 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
@@ -23,20 +23,20 @@ namespace Lucene.Net.Analysis.Nl
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that stems Dutch words.
+ /// A <see cref="TokenFilter"/> that stems Dutch words.
/// <para>
/// It supports a table of words that should
/// not be stemmed at all. The stemmer used can be changed at runtime after the
- /// filter object is created (as long as it is a <seealso cref="DutchStemmer"/>).
+ /// filter object is created (as long as it is a <see cref="DutchStemmer"/>).
/// </para>
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="KeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para> </summary>
/// <seealso cref= KeywordMarkerFilter </seealso>
- /// @deprecated (3.1) Use <seealso cref="SnowballFilter"/> with
- /// <seealso cref="org.tartarus.snowball.ext.DutchStemmer"/> instead, which has the
+ /// @deprecated (3.1) Use <see cref="SnowballFilter"/> with
+ /// <see cref="org.tartarus.snowball.ext.DutchStemmer"/> instead, which has the
/// same functionality. This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use SnowballFilter with DutchStemmer instead, which has the same functionality. This filter will be removed in Lucene 5.0")]
public sealed class DutchStemFilter : TokenFilter
@@ -90,7 +90,7 @@ namespace Lucene.Net.Analysis.Nl
}
/// <summary>
- /// Set a alternative/custom <seealso cref="DutchStemmer"/> for this filter.
+ /// Set a alternative/custom <see cref="DutchStemmer"/> for this filter.
/// </summary>
public DutchStemmer Stemmer
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs
index 6b7f003..036b761 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Nl
/// the <a href="http://snowball.tartarus.org/algorithms/dutch/stemmer.html">dutch stemming</a>
/// algorithm in Martin Porter's snowball project.
/// </para> </summary>
- /// @deprecated (3.1) Use <seealso cref="org.tartarus.snowball.ext.DutchStemmer"/> instead,
+ /// @deprecated (3.1) Use <see cref="org.tartarus.snowball.ext.DutchStemmer"/> instead,
/// which has the same functionality. This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use Tartarus.Snowball.Ext.DutchStemmer instead, which has the same functionality. This filter will be removed in Lucene 5.0")]
public class DutchStemmer
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
index d1edb2d..6d3b1dd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Norwegian.
+ /// <see cref="Analyzer"/> for Norwegian.
/// </summary>
public sealed class NorwegianAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.No
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public NorwegianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
index d967988..6d0e0c1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="NorwegianLightStemmer"/> to stem Norwegian
+ /// A <see cref="TokenFilter"/> that applies <see cref="NorwegianLightStemmer"/> to stem Norwegian
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class NorwegianLightStemFilter : TokenFilter
@@ -46,8 +46,8 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new NorwegianLightStemFilter </summary>
- /// <param name="flags"> set to <seealso cref="NorwegianLightStemmer#BOKMAAL"/>,
- /// <seealso cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
+ /// <param name="flags"> set to <see cref="NorwegianLightStemmer#BOKMAAL"/>,
+ /// <see cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
public NorwegianLightStemFilter(TokenStream input, int flags) : base(input)
{
stemmer = new NorwegianLightStemmer(flags);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs
index ec3499f..cc28b03 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// Factory for <seealso cref="NorwegianLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NorwegianLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.NorwegianLightStemFilterFactory" variant="nb"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NorwegianLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs
index ece0410..3a8a66e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs
@@ -77,7 +77,7 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new NorwegianLightStemmer </summary>
- /// <param name="flags"> set to <seealso cref="#BOKMAAL"/>, <seealso cref="#NYNORSK"/>, or both. </param>
+ /// <param name="flags"> set to <see cref="#BOKMAAL"/>, <see cref="#NYNORSK"/>, or both. </param>
public NorwegianLightStemmer(int flags)
{
if (flags <= 0 || flags > BOKMAAL + NYNORSK)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
index 3e4605b..877fb59 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="NorwegianMinimalStemmer"/> to stem Norwegian
+ /// A <see cref="TokenFilter"/> that applies <see cref="NorwegianMinimalStemmer"/> to stem Norwegian
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class NorwegianMinimalStemFilter : TokenFilter
@@ -46,8 +46,8 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new NorwegianLightStemFilter </summary>
- /// <param name="flags"> set to <seealso cref="NorwegianLightStemmer#BOKMAAL"/>,
- /// <seealso cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
+ /// <param name="flags"> set to <see cref="NorwegianLightStemmer#BOKMAAL"/>,
+ /// <see cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
public NorwegianMinimalStemFilter(TokenStream input, int flags)
: base(input)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs
index 04e9f32..ee99e3d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// Factory for <seealso cref="NorwegianMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NorwegianMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.NorwegianMinimalStemFilterFactory" variant="nb"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NorwegianMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs
index 09afa6c..5724ef2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs
@@ -68,8 +68,8 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new NorwegianMinimalStemmer </summary>
- /// <param name="flags"> set to <seealso cref="NorwegianLightStemmer#BOKMAAL"/>,
- /// <seealso cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
+ /// <param name="flags"> set to <see cref="NorwegianLightStemmer#BOKMAAL"/>,
+ /// <see cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
public NorwegianMinimalStemmer(int flags)
{
if (flags <= 0 || flags > NorwegianLightStemmer.BOKMAAL + NorwegianLightStemmer.NYNORSK)
[05/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs
index 4bf284f..b2dc6cf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Path
///
/// <pre>
/// /something/something/else
- /// </pre>
+ /// </code>
///
/// and make:
///
@@ -36,7 +36,7 @@ namespace Lucene.Net.Analysis.Path
/// /something
/// /something/something
/// /something/something/else
- /// </pre>
+ /// </code>
/// </para>
/// </summary>
public class PathHierarchyTokenizer : Tokenizer
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs
index fa14bef..c25239b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Path
*/
/// <summary>
- /// Factory for <seealso cref="PathHierarchyTokenizer"/>.
+ /// Factory for <see cref="PathHierarchyTokenizer"/>.
/// <para>
/// This factory is typically configured for use only in the <code>index</code>
/// Analyzer (or only in the <code>query</code> Analyzer, but never both).
@@ -37,7 +37,7 @@ namespace Lucene.Net.Analysis.Path
/// <code>Books/Fic</code>...
/// </para>
///
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="descendent_path" class="solr.TextField">
/// <analyzer type="index">
/// <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
@@ -46,7 +46,7 @@ namespace Lucene.Net.Analysis.Path
/// <tokenizer class="solr.KeywordTokenizerFactory" />
/// </analyzer>
/// </fieldType>
- /// </pre>
+ /// </code>
/// <para>
/// In this example however we see the oposite configuration, so that a query
/// for <code>Books/NonFic/Science/Physics</code> would match documents
@@ -55,7 +55,7 @@ namespace Lucene.Net.Analysis.Path
/// <code>Books/NonFic/Science/Physics/Theory</code> or
/// <code>Books/NonFic/Law</code>.
/// </para>
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="descendent_path" class="solr.TextField">
/// <analyzer type="index">
/// <tokenizer class="solr.KeywordTokenizerFactory" />
@@ -64,7 +64,7 @@ namespace Lucene.Net.Analysis.Path
/// <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
/// </analyzer>
/// </fieldType>
- /// </pre>
+ /// </code>
/// </summary>
public class PathHierarchyTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
index 0df60fd..8def15d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Path
///
/// <pre>
/// www.site.co.uk
- /// </pre>
+ /// </code>
///
/// and make:
///
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Path
/// site.co.uk
/// co.uk
/// uk
- /// </pre>
+ /// </code>
///
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
index 9887315..a4154db 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Pattern
*/
/// <summary>
- /// Factory for <seealso cref="PatternCaptureGroupTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PatternCaptureGroupTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_ptncapturegroup" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.KeywordTokenizerFactory"/>
/// <filter class="solr.PatternCaptureGroupFilterFactory" pattern="([^a-z])" preserve_original="true"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
/// <seealso cref= PatternCaptureGroupTokenFilter </seealso>
public class PatternCaptureGroupFilterFactory : TokenFilterFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
index 49aaf7e..0f7a367 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Pattern
/// </code>
/// </para>
/// <para>
- /// plus if <seealso cref="#preserveOriginal"/> is true, it would also return
+ /// plus if <see cref="#preserveOriginal"/> is true, it would also return
/// <code>"camelCaseFilter</code>
/// </para>
/// </summary>
@@ -79,12 +79,12 @@ namespace Lucene.Net.Analysis.Pattern
private int currentMatcher;
/// <param name="input">
- /// the input <seealso cref="TokenStream"/> </param>
+ /// the input <see cref="TokenStream"/> </param>
/// <param name="preserveOriginal">
/// set to true to return the original token even if one of the
/// patterns matches </param>
/// <param name="patterns">
- /// an array of <seealso cref="Pattern"/> objects to match against each token </param>
+ /// an array of <see cref="Pattern"/> objects to match against each token </param>
public PatternCaptureGroupTokenFilter(TokenStream input, bool preserveOriginal, params Regex[] patterns) : base(input)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs
index 1d3a987..ffa4121 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.Pattern
*/
/// <summary>
- /// Factory for <seealso cref="PatternReplaceCharFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PatternReplaceCharFilter"/>.
+ /// <code>
/// <fieldType name="text_ptnreplace" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <charFilter class="solr.PatternReplaceCharFilterFactory"
/// pattern="([^a-z])" replacement=""/>
/// <tokenizer class="solr.KeywordTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
///
/// @since Solr 3.1
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs
index 0fdb959..72c0b82 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis.Pattern
/// Constructs an instance to replace either the first, or all occurances
/// </summary>
/// <param name="in"> the TokenStream to process </param>
- /// <param name="pattern"> the pattern (a <seealso cref="Regex"/> object) to apply to each Token </param>
+ /// <param name="pattern"> the pattern (a <see cref="Regex"/> object) to apply to each Token </param>
/// <param name="replacement"> the "replacement string" to substitute, if null a
/// blank string will be used. Note that this is not the literal
/// string that will be used, '$' and '\' have special meaning. </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs
index 50ed216..b0e3253 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.Pattern
*/
/// <summary>
- /// Factory for <seealso cref="PatternReplaceFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PatternReplaceFilter"/>.
+ /// <code>
/// <fieldType name="text_ptnreplace" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.KeywordTokenizerFactory"/>
/// <filter class="solr.PatternReplaceFilterFactory" pattern="([^a-z])" replacement=""
/// replace="all"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
/// <seealso cref= PatternReplaceFilter </seealso>
public class PatternReplaceFilterFactory : TokenFilterFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
index c3fa237..d0f80a8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Pattern
/// <para>
/// group=-1 (the default) is equivalent to "split". In this case, the tokens will
/// be equivalent to the output from (without empty tokens):
- /// <seealso cref="String#split(java.lang.String)"/>
+ /// <see cref="String#split(java.lang.String)"/>
/// </para>
/// <para>
/// Using group >= 0 selects the matching group as the token. For example, if you have:<br/>
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Pattern
/// pattern = \'([^\']+)\'
/// group = 0
/// input = aaa 'bbb' 'ccc'
- /// </pre>
+ /// </code>
/// the output will be two tokens: 'bbb' and 'ccc' (including the ' marks). With the same input
/// but using group=1, the output would be: bbb and ccc (no ' marks)
/// </para>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
index 26f9be1..033a3d7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Pattern
*/
/// <summary>
- /// Factory for <seealso cref="PatternTokenizer"/>.
+ /// Factory for <see cref="PatternTokenizer"/>.
/// This tokenizer uses regex pattern matching to construct distinct tokens
/// for the input stream. It takes two arguments: "pattern" and "group".
/// <p/>
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.Pattern
/// <para>
/// group=-1 (the default) is equivalent to "split". In this case, the tokens will
/// be equivalent to the output from (without empty tokens):
- /// <seealso cref="String#split(java.lang.String)"/>
+ /// <see cref="String#split(java.lang.String)"/>
/// </para>
/// <para>
/// Using group >= 0 selects the matching group as the token. For example, if you have:<br/>
@@ -49,7 +49,7 @@ namespace Lucene.Net.Analysis.Pattern
/// </para>
/// <para>NOTE: This Tokenizer does not output tokens that are of zero length.</para>
///
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="text_ptn" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.PatternTokenizerFactory" pattern="\'([^\']+)\'" group="1"/>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs
index 4c0bd2f..1e1b5de 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Payloads
/// For example, if the delimiter is '|', then for the string "foo|bar", foo is the token
/// and "bar" is a payload.
/// <p/>
- /// Note, you can also include a <seealso cref="org.apache.lucene.analysis.payloads.PayloadEncoder"/> to convert the payload in an appropriate way (from characters to bytes).
+ /// Note, you can also include a <see cref="org.apache.lucene.analysis.payloads.PayloadEncoder"/> to convert the payload in an appropriate way (from characters to bytes).
/// <p/>
/// Note make sure your Tokenizer doesn't split on the delimiter, or this won't work
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs
index cbaf4f4..5db15c9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Factory for <seealso cref="DelimitedPayloadTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="DelimitedPayloadTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_dlmtd" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float" delimiter="|"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class DelimitedPayloadTokenFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs
index 850402f..f3e38af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Encode a character array Float as a <seealso cref="BytesRef"/>.
+ /// Encode a character array Float as a <see cref="BytesRef"/>.
/// <p/> </summary>
/// <seealso cref= org.apache.lucene.analysis.payloads.PayloadHelper#encodeFloat(float, byte[], int)
///
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs
index c108ed5..879279b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Encode a character array Integer as a <seealso cref="BytesRef"/>.
+ /// Encode a character array Integer as a <see cref="BytesRef"/>.
/// <p/>
- /// See <seealso cref="org.apache.lucene.analysis.payloads.PayloadHelper#encodeInt(int, byte[], int)"/>.
+ /// See <see cref="org.apache.lucene.analysis.payloads.PayloadHelper#encodeInt(int, byte[], int)"/>.
///
///
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
index 873b077..5d3997d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Assigns a payload to a token based on the <seealso cref="org.apache.lucene.analysis.Token#type()"/>
+ /// Assigns a payload to a token based on the <see cref="org.apache.lucene.analysis.Token#type()"/>
///
///
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs
index 0b32784..310ff0a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Factory for <seealso cref="NumericPayloadTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NumericPayloadTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_numpayload" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.NumericPayloadTokenFilterFactory" payload="24" typeMatch="word"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NumericPayloadTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs
index 060569e..3304dc9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Payloads
/// <summary>
/// Mainly for use with the DelimitedPayloadTokenFilter, converts char buffers to
- /// <seealso cref="BytesRef"/>.
+ /// <see cref="BytesRef"/>.
/// <p/>
/// NOTE: This interface is subject to change
///
@@ -32,8 +32,8 @@ namespace Lucene.Net.Analysis.Payloads
BytesRef Encode(char[] buffer);
/// <summary>
- /// Convert a char array to a <seealso cref="BytesRef"/> </summary>
- /// <returns> encoded <seealso cref="BytesRef"/> </returns>
+ /// Convert a char array to a <see cref="BytesRef"/> </summary>
+ /// <returns> encoded <see cref="BytesRef"/> </returns>
BytesRef Encode(char[] buffer, int offset, int length);
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs
index 85f9614..7c861f0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Payloads
}
/// <summary>
- /// Decode the payload that was encoded using <seealso cref="#encodeFloat(float)"/>.
+ /// Decode the payload that was encoded using <see cref="#encodeFloat(float)"/>.
/// NOTE: the length of the array must be at least offset + 4 long. </summary>
/// <param name="bytes"> The bytes to decode </param>
/// <param name="offset"> The offset into the array. </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs
index 9c5b7ff..cffa398 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Adds the <seealso cref="OffsetAttribute#startOffset()"/>
- /// and <seealso cref="OffsetAttribute#endOffset()"/>
+ /// Adds the <see cref="OffsetAttribute#startOffset()"/>
+ /// and <see cref="OffsetAttribute#endOffset()"/>
/// First 4 bytes are the start
///
///
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs
index 1f411e2..111feef 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Factory for <seealso cref="TokenOffsetPayloadTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="TokenOffsetPayloadTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_tokenoffset" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.TokenOffsetPayloadTokenFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class TokenOffsetPayloadTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs
index 81868db..2b3b076 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Makes the <seealso cref="org.apache.lucene.analysis.Token#type()"/> a payload.
+ /// Makes the <see cref="org.apache.lucene.analysis.Token#type()"/> a payload.
///
- /// Encodes the type using <seealso cref="String#getBytes(String)"/> with "UTF-8" as the encoding
+ /// Encodes the type using <see cref="String#getBytes(String)"/> with "UTF-8" as the encoding
///
///
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs
index 5f14bf6..a990d0a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Factory for <seealso cref="TypeAsPayloadTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="TypeAsPayloadTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_typeaspayload" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.TypeAsPayloadTokenFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class TypeAsPayloadTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs
index 9e103b7..2c1ae97 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Position
/// Set the positionIncrement of all tokens to the "positionIncrement",
/// except the first return token which retains its original positionIncrement value.
/// The default positionIncrement value is zero. </summary>
- /// @deprecated (4.4) PositionFilter makes <seealso cref="TokenStream"/> graphs inconsistent
+ /// @deprecated (4.4) PositionFilter makes <see cref="TokenStream"/> graphs inconsistent
/// which can cause highlighting bugs. Its main use-case being to make
/// <a href="{@docRoot}/../queryparser/overview-summary.html">QueryParser</a>
/// generate boolean queries instead of phrase queries, it is now advised to use
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs
index 2fc4993..dfe4d46 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs
@@ -23,16 +23,16 @@ namespace Lucene.Net.Analysis.Position
*/
/// <summary>
- /// Factory for <seealso cref="PositionFilter"/>.
+ /// Factory for <see cref="PositionFilter"/>.
/// Set the positionIncrement of all tokens to the "positionIncrement", except the first return token which retains its
/// original positionIncrement value. The default positionIncrement value is zero.
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="text_position" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.PositionFilterFactory" positionIncrement="0"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
/// <seealso cref=PositionFilter/>
[Obsolete("(4.4)")]
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
index c33bab5..a51dae7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
@@ -28,11 +28,11 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Portuguese.
+ /// <see cref="Analyzer"/> for Portuguese.
/// <para>
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating PortugueseAnalyzer:
/// <ul>
/// <li> As of 3.6, PortugueseLightStemFilter is used for less aggressive stemming.
@@ -87,7 +87,7 @@ namespace Lucene.Net.Analysis.Pt
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public PortugueseAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -106,7 +106,7 @@ namespace Lucene.Net.Analysis.Pt
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -120,15 +120,15 @@ namespace Lucene.Net.Analysis.Pt
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="PortugueseLightStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="PortugueseLightStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs
index c079281..e557bff 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="PortugueseLightStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="PortugueseLightStemmer"/> to stem
/// Portuguese words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class PortugueseLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs
index a0bf456..12dbdfd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// Factory for <seealso cref="PortugueseLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PortugueseLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_ptlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.PortugueseLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PortugueseLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs
index cfd6e46..6f63d4c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="PortugueseMinimalStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="PortugueseMinimalStemmer"/> to stem
/// Portuguese words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class PortugueseMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs
index 0cb0acd..db1927f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// Factory for <seealso cref="PortugueseMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PortugueseMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_ptminstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.PortugueseMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PortugueseMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs
index 88a58ad..560b64a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="PortugueseStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="PortugueseStemmer"/> to stem
/// Portuguese words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class PortugueseStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs
index 54e6dc8..46a5a67 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// Factory for <seealso cref="PortugueseStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PortugueseStemFilter"/>.
+ /// <code>
/// <fieldType name="text_ptstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.PortugueseStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PortugueseStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
index 39e21c0..8e72225 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Pt
/// <para>
/// Since this time a plural-only modification (RSLP-S) as well as a modification
/// for the Galician language have been implemented. This class parses a configuration
- /// file that describes <seealso cref="Step"/>s, where each Step contains a set of <seealso cref="Rule"/>s.
+ /// file that describes <see cref="Step"/>s, where each Step contains a set of <see cref="Rule"/>s.
/// </para>
/// <para>
/// The general rule format is:
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs
index 991e4ed..1b8b913 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Query
*/
/// <summary>
- /// An <seealso cref="Analyzer"/> used primarily at query time to wrap another analyzer and provide a layer of protection
+ /// An <see cref="Analyzer"/> used primarily at query time to wrap another analyzer and provide a layer of protection
/// which prevents very common words from being passed into queries.
/// <para>
/// For very large indexes the cost
@@ -47,9 +47,9 @@ namespace Lucene.Net.Analysis.Query
/// <summary>
/// Creates a new QueryAutoStopWordAnalyzer with stopwords calculated for all
/// indexed fields from terms with a document frequency percentage greater than
- /// <seealso cref="#defaultMaxDocFreqPercent"/>
+ /// <see cref="#defaultMaxDocFreqPercent"/>
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <exception cref="IOException"> Can be thrown while reading from the IndexReader </exception>
@@ -63,7 +63,7 @@ namespace Lucene.Net.Analysis.Query
/// indexed fields from terms with a document frequency greater than the given
/// maxDocFreq
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <param name="maxDocFreq"> Document frequency terms should be above in order to be stopwords </param>
@@ -78,7 +78,7 @@ namespace Lucene.Net.Analysis.Query
/// indexed fields from terms with a document frequency percentage greater than
/// the given maxPercentDocs
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <param name="maxPercentDocs"> The maximum percentage (between 0.0 and 1.0) of index documents which
@@ -94,7 +94,7 @@ namespace Lucene.Net.Analysis.Query
/// given selection of fields from terms with a document frequency percentage
/// greater than the given maxPercentDocs
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <param name="fields"> Selection of fields to calculate stopwords for </param>
@@ -111,7 +111,7 @@ namespace Lucene.Net.Analysis.Query
/// given selection of fields from terms with a document frequency greater than
/// the given maxDocFreq
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <param name="fields"> Selection of fields to calculate stopwords for </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
index 8179914..117be89 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Reverse
/// wildcards search.
/// </para>
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating ReverseStringFilter, or when using any of
/// its static methods:
/// <ul>
@@ -68,27 +68,27 @@ namespace Lucene.Net.Analysis.Reverse
/// <summary>
/// Create a new ReverseStringFilter that reverses all tokens in the
- /// supplied <seealso cref="TokenStream"/>.
+ /// supplied <see cref="TokenStream"/>.
/// <para>
/// The reversed tokens will not be marked.
/// </para>
/// </summary>
/// <param name="matchVersion"> See <a href="#version">above</a> </param>
- /// <param name="in"> <seealso cref="TokenStream"/> to filter </param>
+ /// <param name="in"> <see cref="TokenStream"/> to filter </param>
public ReverseStringFilter(LuceneVersion matchVersion, TokenStream @in) : this(matchVersion, @in, NOMARKER)
{
}
/// <summary>
/// Create a new ReverseStringFilter that reverses and marks all tokens in the
- /// supplied <seealso cref="TokenStream"/>.
+ /// supplied <see cref="TokenStream"/>.
/// <para>
/// The reversed tokens will be prepended (marked) by the <code>marker</code>
/// character.
/// </para>
/// </summary>
/// <param name="matchVersion"> See <a href="#version">above</a> </param>
- /// <param name="in"> <seealso cref="TokenStream"/> to filter </param>
+ /// <param name="in"> <see cref="TokenStream"/> to filter </param>
/// <param name="marker"> A character used to mark reversed tokens </param>
public ReverseStringFilter(LuceneVersion matchVersion, TokenStream @in, char marker) : base(@in)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs
index fd69e30..ee3ce61 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Reverse
*/
/// <summary>
- /// Factory for <seealso cref="ReverseStringFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ReverseStringFilter"/>.
+ /// <code>
/// <fieldType name="text_rvsstr" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ReverseStringFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
///
/// @since solr 1.4
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
index 9dce193..6c0d9d3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Ro
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Romanian.
+ /// <see cref="Analyzer"/> for Romanian.
/// </summary>
public sealed class RomanianAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Ro
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public RomanianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Ro
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Ro
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
index 62ad10a..e62f65b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
@@ -29,14 +29,14 @@ namespace Lucene.Net.Analysis.Ru
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Russian language.
+ /// <see cref="Analyzer"/> for Russian language.
/// <para>
/// Supports an external list of stopwords (words that
/// will not be indexed at all).
/// A default set of stopwords is used unless an alternative list is specified.
/// </para>
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating RussianAnalyzer:
/// <ul>
/// <li> As of 3.1, StandardTokenizer is used, Snowball stemming is done with
@@ -133,14 +133,14 @@ namespace Lucene.Net.Analysis.Ru
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided, and <seealso cref="SnowballFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided, and <see cref="SnowballFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
#pragma warning disable 612, 618
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs
index 15db0f7..1ffa004 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs
@@ -24,20 +24,20 @@ namespace Lucene.Net.Analysis.Ru
*/
/// <summary>
- /// A RussianLetterTokenizer is a <seealso cref="Tokenizer"/> that extends <seealso cref="LetterTokenizer"/>
+ /// A RussianLetterTokenizer is a <see cref="Tokenizer"/> that extends <see cref="LetterTokenizer"/>
/// by also allowing the basic Latin digits 0-9.
/// <para>
/// <a name="version"/>
- /// You must specify the required <seealso cref="Version"/> compatibility when creating
- /// <seealso cref="RussianLetterTokenizer"/>:
+ /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
+ /// <see cref="RussianLetterTokenizer"/>:
/// <ul>
- /// <li>As of 3.1, <seealso cref="CharTokenizer"/> uses an int based API to normalize and
- /// detect token characters. See <seealso cref="CharTokenizer#isTokenChar(int)"/> and
- /// <seealso cref="CharTokenizer#normalize(int)"/> for details.</li>
+ /// <li>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+ /// detect token characters. See <see cref="CharTokenizer#isTokenChar(int)"/> and
+ /// <see cref="CharTokenizer#normalize(int)"/> for details.</li>
/// </ul>
/// </para>
/// </summary>
- /// @deprecated (3.1) Use <seealso cref="StandardTokenizer"/> instead, which has the same functionality.
+ /// @deprecated (3.1) Use <see cref="StandardTokenizer"/> instead, which has the same functionality.
/// This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use StandardTokenizer instead, which has the same functionality.")]
public class RussianLetterTokenizer : CharTokenizer
@@ -46,7 +46,7 @@ namespace Lucene.Net.Analysis.Ru
private const int DIGIT_9 = '9';
/// Construct a new RussianLetterTokenizer. * <param name="matchVersion"> Lucene version
- /// to match See <seealso cref="<a href="#version">above</a>"/>
+ /// to match See <see cref="<a href="#version">above</a>"/>
/// </param>
/// <param name="in">
/// the input to split up into tokens </param>
@@ -57,12 +57,12 @@ namespace Lucene.Net.Analysis.Ru
/// <summary>
/// Construct a new RussianLetterTokenizer using a given
- /// <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>. * @param
+ /// <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>. * @param
/// matchVersion Lucene version to match See
- /// <seealso cref="<a href="#version">above</a>"/>
+ /// <see cref="<a href="#version">above</a>"/>
/// </summary>
/// <param name="factory">
- /// the attribute factory to use for this <seealso cref="Tokenizer"/> </param>
+ /// the attribute factory to use for this <see cref="Tokenizer"/> </param>
/// <param name="in">
/// the input to split up into tokens </param>
public RussianLetterTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader @in)
@@ -72,7 +72,7 @@ namespace Lucene.Net.Analysis.Ru
/// <summary>
/// Collects only characters which satisfy
- /// <seealso cref="Character#isLetter(int)"/>.
+ /// <see cref="Character#isLetter(int)"/>.
/// </summary>
protected override bool IsTokenChar(int c)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs
index 20f9142..16b09c2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Ru
* limitations under the License.
*/
- /// @deprecated Use <seealso cref="org.apache.lucene.analysis.standard.StandardTokenizerFactory"/> instead.
+ /// @deprecated Use <see cref="org.apache.lucene.analysis.standard.StandardTokenizerFactory"/> instead.
/// This tokenizer has no Russian-specific functionality.
[Obsolete("Use StandardTokenizerFactory instead.")]
public class RussianLetterTokenizerFactory : TokenizerFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs
index 74ff113..66a1599 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Ru
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="RussianLightStemmer"/> to stem Russian
+ /// A <see cref="TokenFilter"/> that applies <see cref="RussianLightStemmer"/> to stem Russian
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class RussianLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs
index 7def611..8073c78 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Ru
*/
/// <summary>
- /// Factory for <seealso cref="RussianLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="RussianLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_rulgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.RussianLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class RussianLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs
index 34f8063..b3634dc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Shingle
*/
/// <summary>
- /// A ShingleAnalyzerWrapper wraps a <seealso cref="ShingleFilter"/> around another <seealso cref="Analyzer"/>.
+ /// A ShingleAnalyzerWrapper wraps a <see cref="ShingleFilter"/> around another <see cref="Analyzer"/>.
/// <para>
/// A shingle is another name for a token based n-gram.
/// </para>
@@ -97,7 +97,7 @@ namespace Lucene.Net.Analysis.Shingle
}
/// <summary>
- /// Wraps <seealso cref="StandardAnalyzer"/>.
+ /// Wraps <see cref="StandardAnalyzer"/>.
/// </summary>
public ShingleAnalyzerWrapper(LuceneVersion matchVersion)
: this(matchVersion, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE)
@@ -105,7 +105,7 @@ namespace Lucene.Net.Analysis.Shingle
}
/// <summary>
- /// Wraps <seealso cref="StandardAnalyzer"/>.
+ /// Wraps <see cref="StandardAnalyzer"/>.
/// </summary>
public ShingleAnalyzerWrapper(LuceneVersion matchVersion, int minShingleSize, int maxShingleSize)
: this(new StandardAnalyzer(matchVersion), minShingleSize, maxShingleSize)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs
index 61348b4..19b07a0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs
@@ -165,7 +165,7 @@ namespace Lucene.Net.Analysis.Shingle
/// <summary>
/// Constructs a ShingleFilter with the specified shingle size from the
- /// <seealso cref="TokenStream"/> <code>input</code>
+ /// <see cref="TokenStream"/> <code>input</code>
/// </summary>
/// <param name="input"> input stream </param>
/// <param name="minShingleSize"> minimum shingle size produced by the filter. </param>
@@ -184,7 +184,7 @@ namespace Lucene.Net.Analysis.Shingle
/// <summary>
/// Constructs a ShingleFilter with the specified shingle size from the
- /// <seealso cref="TokenStream"/> <code>input</code>
+ /// <see cref="TokenStream"/> <code>input</code>
/// </summary>
/// <param name="input"> input stream </param>
/// <param name="maxShingleSize"> maximum shingle size produced by the filter. </param>
@@ -378,7 +378,7 @@ namespace Lucene.Net.Analysis.Shingle
/// <para>Get the next token from the input stream.
/// </para>
/// <para>If the next token has <code>positionIncrement > 1</code>,
- /// <code>positionIncrement - 1</code> <seealso cref="#fillerToken"/>s are
+ /// <code>positionIncrement - 1</code> <see cref="#fillerToken"/>s are
/// inserted first.
/// </para>
/// </summary>
@@ -496,10 +496,10 @@ namespace Lucene.Net.Analysis.Shingle
}
/// <summary>
- /// <para>Fills <seealso cref="#inputWindow"/> with input stream tokens, if available,
+ /// <para>Fills <see cref="#inputWindow"/> with input stream tokens, if available,
/// shifting to the right if the window was previously full.
/// </para>
- /// <para>Resets <seealso cref="#gramSize"/> to its minimum value.
+ /// <para>Resets <see cref="#gramSize"/> to its minimum value.
///
/// </para>
/// </summary>
@@ -570,13 +570,13 @@ namespace Lucene.Net.Analysis.Shingle
/// <summary>
/// <para>An instance of this class is used to maintain the number of input
/// stream tokens that will be used to compose the next unigram or shingle:
- /// <seealso cref="#gramSize"/>.
+ /// <see cref="#gramSize"/>.
/// </para>
/// <para><code>gramSize</code> will take on values from the circular sequence
- /// <b>{ [ 1, ] <seealso cref="#minShingleSize"/> [ , ... , <seealso cref="#maxShingleSize"/> ] }</b>.
+ /// <b>{ [ 1, ] <see cref="#minShingleSize"/> [ , ... , <see cref="#maxShingleSize"/> ] }</b>.
/// </para>
/// <para>1 is included in the circular sequence only if
- /// <seealso cref="#outputUnigrams"/> = true.
+ /// <see cref="#outputUnigrams"/> = true.
/// </para>
/// </summary>
private class CircularSequence
@@ -608,10 +608,10 @@ namespace Lucene.Net.Analysis.Shingle
/// <para>Increments this circular number's value to the next member in the
/// circular sequence
/// <code>gramSize</code> will take on values from the circular sequence
- /// <b>{ [ 1, ] <seealso cref="#minShingleSize"/> [ , ... , <seealso cref="#maxShingleSize"/> ] }</b>.
+ /// <b>{ [ 1, ] <see cref="#minShingleSize"/> [ , ... , <see cref="#maxShingleSize"/> ] }</b>.
/// </para>
/// <para>1 is included in the circular sequence only if
- /// <seealso cref="#outputUnigrams"/> = true.
+ /// <see cref="#outputUnigrams"/> = true.
/// </para>
/// </summary>
public virtual void advance()
@@ -636,10 +636,10 @@ namespace Lucene.Net.Analysis.Shingle
/// circular sequence
/// </para>
/// <para><code>gramSize</code> will take on values from the circular sequence
- /// <b>{ [ 1, ] <seealso cref="#minShingleSize"/> [ , ... , <seealso cref="#maxShingleSize"/> ] }</b>.
+ /// <b>{ [ 1, ] <see cref="#minShingleSize"/> [ , ... , <see cref="#maxShingleSize"/> ] }</b>.
/// </para>
/// <para>1 is included in the circular sequence only if
- /// <seealso cref="#outputUnigrams"/> = true.
+ /// <see cref="#outputUnigrams"/> = true.
/// </para>
/// </summary>
public virtual void reset()
@@ -651,8 +651,8 @@ namespace Lucene.Net.Analysis.Shingle
/// <para>Returns true if the current value is the first member of the circular
/// sequence.
/// </para>
- /// <para>If <seealso cref="#outputUnigrams"/> = true, the first member of the circular
- /// sequence will be 1; otherwise, it will be <seealso cref="#minShingleSize"/>.
+ /// <para>If <see cref="#outputUnigrams"/> = true, the first member of the circular
+ /// sequence will be 1; otherwise, it will be <see cref="#minShingleSize"/>.
///
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs
index 9dac23f..782fb83 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Shingle
*/
/// <summary>
- /// Factory for <seealso cref="ShingleFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ShingleFilter"/>.
+ /// <code>
/// <fieldType name="text_shingle" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ShingleFilterFactory" minShingleSize="2" maxShingleSize="2"
/// outputUnigrams="true" outputUnigramsIfNoShingles="false" tokenSeparator=" " fillerToken="_"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ShingleFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs
index 3abd14b..bb31ae6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Sinks
*/
/// <summary>
- /// Attempts to parse the <seealso cref="CharTermAttribute.ToString()"/> as a Date using either the
+ /// Attempts to parse the <see cref="CharTermAttribute.ToString()"/> as a Date using either the
/// <see cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/> or
/// <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/> methods.
/// If a format is passed, <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the current culture and <see cref="DateTimeStyles.None"/>.
- /// Loosely matches standard DateTime formats using <seealso cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Loosely matches standard DateTime formats using <see cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
public DateRecognizerSinkFilter()
: this((string[])null, DateTimeFormatInfo.CurrentInfo, DateTimeStyles.None)
@@ -47,7 +47,7 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied culture and <see cref="DateTimeStyles.None"/>.
- /// Loosely matches standard DateTime formats using <seealso cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Loosely matches standard DateTime formats using <see cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
/// <param name="culture">An object that supplies culture-specific format information</param>
public DateRecognizerSinkFilter(IFormatProvider culture)
@@ -56,9 +56,9 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the current culture and <see cref="DateTimeStyles.None"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="format">The allowable format of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="format">The allowable format of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, it must match the format of the date exactly to get a match.</param>
public DateRecognizerSinkFilter(string format)
: this(new string[] { format }, DateTimeFormatInfo.CurrentInfo, DateTimeStyles.None)
@@ -66,9 +66,9 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the current culture and <see cref="DateTimeStyles.None"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="formats">An array of allowable formats of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="formats">An array of allowable formats of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, one of them must match the format of the date exactly to get a match.</param>
public DateRecognizerSinkFilter(string[] formats)
: this(formats, DateTimeFormatInfo.CurrentInfo, DateTimeStyles.None)
@@ -76,21 +76,21 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied culture and <see cref="DateTimeStyles"/>.
- /// Loosely matches standard DateTime formats using <seealso cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Loosely matches standard DateTime formats using <see cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
/// If supplied, one of them must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
/// <param name="style">A bitwise combination of enumeration values that indicates the permitted format of s.
- /// A typical value to specify is <seealso cref="DateTimeStyles.None"/></param>
+ /// A typical value to specify is <see cref="DateTimeStyles.None"/></param>
public DateRecognizerSinkFilter(IFormatProvider culture, DateTimeStyles style)
:this((string[])null, culture, style)
{ }
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied format, culture and <see cref="DateTimeStyles.None"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="format">The allowable format of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="format">The allowable format of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, it must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
public DateRecognizerSinkFilter(string format, IFormatProvider culture)
@@ -99,9 +99,9 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied formats, culture and <see cref="DateTimeStyles.None"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="formats">An array of allowable formats of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="formats">An array of allowable formats of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, one of them must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
public DateRecognizerSinkFilter(string[] formats, IFormatProvider culture)
@@ -110,26 +110,26 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied format, culture and <see cref="DateTimeStyles"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="format">The allowable format of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="format">The allowable format of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, it must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
/// <param name="style">A bitwise combination of enumeration values that indicates the permitted format of s.
- /// A typical value to specify is <seealso cref="DateTimeStyles.None"/></param>
+ /// A typical value to specify is <see cref="DateTimeStyles.None"/></param>
public DateRecognizerSinkFilter(string format, IFormatProvider culture, DateTimeStyles style)
: this(new string[] { format }, culture, style)
{ }
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied formats, culture and <see cref="DateTimeStyles"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="formats">An array of allowable formats of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="formats">An array of allowable formats of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, one of them must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
/// <param name="style">A bitwise combination of enumeration values that indicates the permitted format of s.
- /// A typical value to specify is <seealso cref="DateTimeStyles.None"/></param>
+ /// A typical value to specify is <see cref="DateTimeStyles.None"/></param>
public DateRecognizerSinkFilter(string[] formats, IFormatProvider culture, DateTimeStyles style)
{
this.m_culture = culture;
[04/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
index 2eba697..1538470 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Sinks
/// It is also useful for doing things like entity extraction or proper noun analysis as
/// part of the analysis workflow and saving off those tokens for use in another field.
///
- /// <pre class="prettyprint">
+ /// <code>
/// TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(version, reader1));
/// TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
/// TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
@@ -47,23 +47,23 @@ namespace Lucene.Net.Analysis.Sinks
/// d.add(new TextField("f2", final2, Field.Store.NO));
/// d.add(new TextField("f3", final3, Field.Store.NO));
/// d.add(new TextField("f4", final4, Field.Store.NO));
- /// </pre>
+ /// </code>
/// In this example, <code>sink1</code> and <code>sink2</code> will both get tokens from both
/// <code>reader1</code> and <code>reader2</code> after whitespace tokenizer
/// and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
/// It is important, that tees are consumed before sinks (in the above example, the field names must be
/// less the sink's field names). If you are not sure, which stream is consumed first, you can simply
- /// add another sink and then pass all tokens to the sinks at once using <seealso cref="#consumeAllTokens"/>.
+ /// add another sink and then pass all tokens to the sinks at once using <see cref="#consumeAllTokens"/>.
/// This TokenFilter is exhausted after this. In the above example, change
/// the example above to:
- /// <pre class="prettyprint">
+ /// <code>
/// ...
/// TokenStream final1 = new LowerCaseFilter(version, source1.newSinkTokenStream());
/// TokenStream final2 = source2.newSinkTokenStream();
/// sink1.consumeAllTokens();
/// sink2.consumeAllTokens();
/// ...
- /// </pre>
+ /// </code>
/// In this case, the fields can be added in any order, because the sources are not used anymore and all sinks are ready.
/// <para>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
/// </para>
@@ -81,7 +81,7 @@ namespace Lucene.Net.Analysis.Sinks
}
/// <summary>
- /// Returns a new <seealso cref="SinkTokenStream"/> that receives all tokens consumed by this stream.
+ /// Returns a new <see cref="SinkTokenStream"/> that receives all tokens consumed by this stream.
/// </summary>
public SinkTokenStream NewSinkTokenStream()
{
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Sinks
}
/// <summary>
- /// Returns a new <seealso cref="SinkTokenStream"/> that receives all tokens consumed by this stream
+ /// Returns a new <see cref="SinkTokenStream"/> that receives all tokens consumed by this stream
/// that pass the supplied filter. </summary>
/// <seealso cref= SinkFilter></seealso>
public SinkTokenStream NewSinkTokenStream(SinkFilter filter)
@@ -100,7 +100,7 @@ namespace Lucene.Net.Analysis.Sinks
}
/// <summary>
- /// Adds a <seealso cref="SinkTokenStream"/> created by another <code>TeeSinkTokenFilter</code>
+ /// Adds a <see cref="SinkTokenStream"/> created by another <code>TeeSinkTokenFilter</code>
/// to this one. The supplied stream will also receive all consumed tokens.
/// This method can be used to pass tokens from two different tees to one sink.
/// </summary>
@@ -174,18 +174,18 @@ namespace Lucene.Net.Analysis.Sinks
}
/// <summary>
- /// A filter that decides which <seealso cref="AttributeSource"/> states to store in the sink.
+ /// A filter that decides which <see cref="AttributeSource"/> states to store in the sink.
/// </summary>
public abstract class SinkFilter
{
/// <summary>
- /// Returns true, iff the current state of the passed-in <seealso cref="AttributeSource"/> shall be stored
+ /// Returns true, iff the current state of the passed-in <see cref="AttributeSource"/> shall be stored
/// in the sink.
/// </summary>
public abstract bool Accept(AttributeSource source);
/// <summary>
- /// Called by <seealso cref="SinkTokenStream#reset()"/>. This method does nothing by default
+ /// Called by <see cref="SinkTokenStream#reset()"/>. This method does nothing by default
/// and can optionally be overridden.
/// </summary>
public virtual void Reset()
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
index 4a4786d..352616a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
@@ -27,17 +27,17 @@ namespace Lucene.Net.Analysis.Snowball
*/
/// <summary>
- /// Filters <seealso cref="StandardTokenizer"/> with <seealso cref="StandardFilter"/>, {@link
- /// LowerCaseFilter}, <seealso cref="StopFilter"/> and <seealso cref="SnowballFilter"/>.
+ /// Filters <see cref="StandardTokenizer"/> with <see cref="StandardFilter"/>, {@link
+ /// LowerCaseFilter}, <see cref="StopFilter"/> and <see cref="SnowballFilter"/>.
///
/// Available stemmers are listed in org.tartarus.snowball.ext. The name of a
/// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
- /// <seealso cref="org.tartarus.snowball.ext.EnglishStemmer"/> is named "English".
+ /// <see cref="org.tartarus.snowball.ext.EnglishStemmer"/> is named "English".
///
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>, with the following addition:
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>, with the following addition:
/// <ul>
- /// <li> As of 3.1, uses <seealso cref="TurkishLowerCaseFilter"/> for Turkish language.
+ /// <li> As of 3.1, uses <see cref="TurkishLowerCaseFilter"/> for Turkish language.
/// </ul>
/// </para> </summary>
/// @deprecated (3.1) Use the language-specific analyzer in modules/analysis instead.
@@ -65,9 +65,9 @@ namespace Lucene.Net.Analysis.Snowball
}
/// <summary>
- /// Constructs a <seealso cref="StandardTokenizer"/> filtered by a {@link
- /// StandardFilter}, a <seealso cref="LowerCaseFilter"/>, a <seealso cref="StopFilter"/>,
- /// and a <seealso cref="SnowballFilter"/>
+ /// Constructs a <see cref="StandardTokenizer"/> filtered by a {@link
+ /// StandardFilter}, a <see cref="LowerCaseFilter"/>, a <see cref="StopFilter"/>,
+ /// and a <see cref="SnowballFilter"/>
/// </summary>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
index ca33892..103bd66 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
@@ -25,22 +25,22 @@ namespace Lucene.Net.Analysis.Snowball
/// <summary>
/// A filter that stems words using a Snowball-generated stemmer.
///
- /// Available stemmers are listed in <seealso cref="org.tartarus.snowball.ext"/>.
+ /// Available stemmers are listed in <see cref="org.tartarus.snowball.ext"/>.
/// <para><b>NOTE</b>: SnowballFilter expects lowercased text.
/// <ul>
- /// <li>For the Turkish language, see <seealso cref="TurkishLowerCaseFilter"/>.
- /// <li>For other languages, see <seealso cref="LowerCaseFilter"/>.
+ /// <li>For the Turkish language, see <see cref="TurkishLowerCaseFilter"/>.
+ /// <li>For other languages, see <see cref="LowerCaseFilter"/>.
/// </ul>
/// </para>
///
/// <para>
- /// Note: This filter is aware of the <seealso cref="KeywordAttribute"/>. To prevent
+ /// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
/// certain terms from being passed to the stemmer
- /// <seealso cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
- /// in a previous <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+ /// in a previous <see cref="TokenStream"/>.
///
/// Note: For including the original term as well as the stemmed version, see
- /// <seealso cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+ /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
/// </para>
///
///
@@ -64,9 +64,9 @@ namespace Lucene.Net.Analysis.Snowball
/// <summary>
/// Construct the named stemming filter.
///
- /// Available stemmers are listed in <seealso cref="org.tartarus.snowball.ext"/>.
+ /// Available stemmers are listed in <see cref="org.tartarus.snowball.ext"/>.
/// The name of a stemmer is the part of the class name before "Stemmer",
- /// e.g., the stemmer in <seealso cref="org.tartarus.snowball.ext.EnglishStemmer"/> is named "English".
+ /// e.g., the stemmer in <see cref="org.tartarus.snowball.ext.EnglishStemmer"/> is named "English".
/// </summary>
/// <param name="in"> the input tokens to stem </param>
/// <param name="name"> the name of a stemmer </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs
index 19c199e..707467f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs
@@ -25,17 +25,17 @@ namespace Lucene.Net.Analysis.Snowball
*/
/// <summary>
- /// Factory for <seealso cref="SnowballFilter"/>, with configurable language
+ /// Factory for <see cref="SnowballFilter"/>, with configurable language
/// <para>
/// Note: Use of the "Lovins" stemmer is not recommended, as it is implemented with reflection.
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="text_snowballstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.SnowballPorterFilterFactory" protected="protectedkeyword.txt" language="English"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </para>
/// </summary>
public class SnowballPorterFilterFactory : TokenFilterFactory, IResourceLoaderAware
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
index 7305f56..70aa887 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Filters <seealso cref="ClassicTokenizer"/> with <seealso cref="ClassicFilter"/>, {@link
- /// LowerCaseFilter} and <seealso cref="StopFilter"/>, using a list of
+ /// Filters <see cref="ClassicTokenizer"/> with <see cref="ClassicFilter"/>, {@link
+ /// LowerCaseFilter} and <see cref="StopFilter"/>, using a list of
/// English stop words.
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="LuceneVersion"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating ClassicAnalyzer:
/// <ul>
/// <li> As of 3.1, StopFilter correctly handles Unicode 4.0
@@ -40,7 +40,7 @@ namespace Lucene.Net.Analysis.Standard
/// </ul>
///
/// ClassicAnalyzer was named StandardAnalyzer in Lucene versions prior to 3.1.
- /// As of 3.1, <seealso cref="StandardAnalyzer"/> implements Unicode text segmentation,
+ /// As of 3.1, <see cref="StandardAnalyzer"/> implements Unicode text segmentation,
/// as specified by UAX#29.
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs
index 297c406..c9bd8cf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs
@@ -19,7 +19,7 @@ namespace Lucene.Net.Analysis.Standard
* limitations under the License.
*/
/// <summary>
- /// Normalizes tokens extracted with <seealso cref="ClassicTokenizer"/>. </summary>
+ /// Normalizes tokens extracted with <see cref="ClassicTokenizer"/>. </summary>
public class ClassicFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs
index d310f68..80fac18 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="ClassicFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ClassicFilter"/>.
+ /// <code>
/// <fieldType name="text_clssc" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.ClassicTokenizerFactory"/>
/// <filter class="solr.ClassicFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ClassicFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
index ae4af96..415bdb7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
@@ -40,7 +40,7 @@ namespace Lucene.Net.Analysis.Standard
/// directory to your project and maintaining your own grammar-based tokenizer.
///
/// ClassicTokenizer was named StandardTokenizer in Lucene versions prior to 3.1.
- /// As of 3.1, <seealso cref="StandardTokenizer"/> implements Unicode text segmentation,
+ /// As of 3.1, <see cref="StandardTokenizer"/> implements Unicode text segmentation,
/// as specified by UAX#29.
/// </para>
/// </summary>
@@ -92,7 +92,7 @@ namespace Lucene.Net.Analysis.Standard
/// <summary>
- /// Creates a new instance of the <seealso cref="ClassicTokenizer"/>. Attaches
+ /// Creates a new instance of the <see cref="ClassicTokenizer"/>. Attaches
/// the <code>input</code> to the newly created JFlex scanner.
/// </summary>
/// <param name="input"> The input reader
@@ -105,7 +105,7 @@ namespace Lucene.Net.Analysis.Standard
}
/// <summary>
- /// Creates a new ClassicTokenizer with a given <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
+ /// Creates a new ClassicTokenizer with a given <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
/// </summary>
public ClassicTokenizer(LuceneVersion matchVersion, AttributeFactory factory, Reader input)
: base(factory, input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs
index 4f895ab..079e824 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="ClassicTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ClassicTokenizer"/>.
+ /// <code>
/// <fieldType name="text_clssc" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.ClassicTokenizerFactory" maxTokenLength="120"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ClassicTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
index 5a31f16..5770b55 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Filters <seealso cref="StandardTokenizer"/> with <seealso cref="StandardFilter"/>, {@link
- /// LowerCaseFilter} and <seealso cref="StopFilter"/>, using a list of
+ /// Filters <see cref="StandardTokenizer"/> with <see cref="StandardFilter"/>, {@link
+ /// LowerCaseFilter} and <see cref="StopFilter"/>, using a list of
/// English stop words.
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="LuceneVersion"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating StandardAnalyzer:
/// <ul>
/// <li> As of 3.4, Hiragana and Han characters are no longer wrongly split
@@ -36,7 +36,7 @@ namespace Lucene.Net.Analysis.Standard
/// you get the exact broken behavior for backwards compatibility.
/// <li> As of 3.1, StandardTokenizer implements Unicode text segmentation,
/// and StopFilter correctly handles Unicode 4.0 supplementary characters
- /// in stopwords. <seealso cref="ClassicTokenizer"/> and <seealso cref="ClassicAnalyzer"/>
+ /// in stopwords. <see cref="ClassicTokenizer"/> and <see cref="ClassicAnalyzer"/>
/// are the pre-3.1 implementations of StandardTokenizer and
/// StandardAnalyzer.
/// <li> As of 2.9, StopFilter preserves position increments
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs
index 5513468..0c78b06 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Normalizes tokens extracted with <seealso cref="StandardTokenizer"/>.
+ /// Normalizes tokens extracted with <see cref="StandardTokenizer"/>.
/// </summary>
public class StandardFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs
index d968bc4..5476920 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="StandardFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="StandardFilter"/>.
+ /// <code>
/// <fieldType name="text_stndrd" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.StandardFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class StandardFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
index 37f2ac7..deae880 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Standard
///
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating StandardTokenizer:
/// <ul>
/// <li> As of 3.4, Hiragana and Han characters are no longer wrongly split
@@ -49,7 +49,7 @@ namespace Lucene.Net.Analysis.Standard
/// you get the exact broken behavior for backwards compatibility.
/// <li> As of 3.1, StandardTokenizer implements Unicode text segmentation.
/// If you use a previous version number, you get the exact behavior of
- /// <seealso cref="ClassicTokenizer"/> for backwards compatibility.
+ /// <see cref="ClassicTokenizer"/> for backwards compatibility.
/// </ul>
/// </para>
/// </summary>
@@ -119,7 +119,7 @@ namespace Lucene.Net.Analysis.Standard
/// <summary>
- /// Creates a new instance of the <seealso cref="StandardTokenizer"/>. Attaches
+ /// Creates a new instance of the <see cref="StandardTokenizer"/>. Attaches
/// the <code>input</code> to the newly created JFlex scanner.
/// </summary>
/// <param name="input"> The input reader
@@ -132,7 +132,7 @@ namespace Lucene.Net.Analysis.Standard
}
/// <summary>
- /// Creates a new StandardTokenizer with a given <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
+ /// Creates a new StandardTokenizer with a given <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
/// </summary>
public StandardTokenizer(Version matchVersion, AttributeFactory factory, Reader input)
: base(factory, input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs
index f933316..cbc5915 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="StandardTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="StandardTokenizer"/>.
+ /// <code>
/// <fieldType name="text_stndrd" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory" maxTokenLength="255"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class StandardTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs
index d15a349..c250996 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Standard
/// Resumes scanning until the next regular expression is matched,
/// the end of input is encountered or an I/O-Error occurs.
/// </summary>
- /// <returns> the next token, <seealso cref="#YYEOF"/> on end of stream </returns>
+ /// <returns> the next token, <see cref="#YYEOF"/> on end of stream </returns>
/// <exception cref="IOException"> if any I/O-Error occurs </exception>
int GetNextToken();
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs
index 9ff6b74..502b98c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Filters <seealso cref="org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer"/>
- /// with <seealso cref="StandardFilter"/>,
- /// <seealso cref="LowerCaseFilter"/> and
- /// <seealso cref="StopFilter"/>, using a list of
+ /// Filters <see cref="org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer"/>
+ /// with <see cref="StandardFilter"/>,
+ /// <see cref="LowerCaseFilter"/> and
+ /// <see cref="StopFilter"/>, using a list of
/// English stop words.
///
/// <a name="version"/>
/// <para>
- /// You must specify the required <seealso cref="org.apache.lucene.util.Version"/>
+ /// You must specify the required <see cref="org.apache.lucene.util.Version"/>
/// compatibility when creating UAX29URLEmailAnalyzer
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
index 0821678..2c91236 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
@@ -44,7 +44,7 @@ namespace Lucene.Net.Analysis.Standard
/// <li><HIRAGANA>: A single hiragana character</li>
/// </ul>
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating UAX29URLEmailTokenizer:
/// <ul>
/// <li> As of 3.4, Hiragana and Han characters are no longer wrongly split
@@ -110,7 +110,7 @@ namespace Lucene.Net.Analysis.Standard
}
/// <summary>
- /// Creates a new UAX29URLEmailTokenizer with a given <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
+ /// Creates a new UAX29URLEmailTokenizer with a given <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
/// </summary>
public UAX29URLEmailTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader input)
: base(factory, input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs
index 9375c3a..dc902f8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs
@@ -24,13 +24,13 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="UAX29URLEmailTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="UAX29URLEmailTokenizer"/>.
+ /// <code>
/// <fieldType name="text_urlemail" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.UAX29URLEmailTokenizerFactory" maxTokenLength="255"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class UAX29URLEmailTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
index c1fefe3..7a334a7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Sv
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Swedish.
+ /// <see cref="Analyzer"/> for Swedish.
/// </summary>
public sealed class SwedishAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Sv
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public SwedishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Sv
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Sv
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs
index 32bb191..356469e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.Sv
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="SwedishLightStemmer"/> to stem Swedish
+ /// A <see cref="TokenFilter"/> that applies <see cref="SwedishLightStemmer"/> to stem Swedish
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class SwedishLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs
index 213154a..11ca0ab 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Sv
*/
/// <summary>
- /// Factory for <seealso cref="SwedishLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="SwedishLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.SwedishLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class SwedishLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs
index 6aaa25f..2b09a51 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Synonym
private SynonymMap map;
- [Obsolete(@"(3.4) use <seealso cref=""SynonymFilterFactory"" instead. this is only a backwards compatibility")]
+ [Obsolete(@"(3.4) use <see cref=""SynonymFilterFactory"" instead. this is only a backwards compatibility")]
public FSTSynonymFilterFactory(IDictionary<string, string> args)
: base(args)
{
@@ -122,7 +122,7 @@ namespace Lucene.Net.Analysis.Synonym
}
/// <summary>
- /// Load synonyms with the given <seealso cref="SynonymMap.Parser"/> class.
+ /// Load synonyms with the given <see cref="SynonymMap.Parser"/> class.
/// </summary>
private SynonymMap LoadSynonyms(IResourceLoader loader, string cname, bool dedup, Analyzer analyzer)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs
index 0a8ee7e..b9b7eb0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Synonym
/// Generated synonyms will start at the same position as the first matched source token.
/// </para>
/// </summary>
- /// @deprecated (3.4) use <seealso cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
+ /// @deprecated (3.4) use <see cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
[Obsolete("(3.4) use <seealso cref=\"SynonymFilterFactory\"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0")]
internal sealed class SlowSynonymFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs
index f53f978..7d51320 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Synonym
*/
/// <summary>
- /// Factory for <seealso cref="SlowSynonymFilter"/> (only used with luceneMatchVersion < 3.4)
+ /// Factory for <see cref="SlowSynonymFilter"/> (only used with luceneMatchVersion < 3.4)
/// <pre class="prettyprint" >
/// <fieldType name="text_synonym" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
@@ -38,8 +38,8 @@ namespace Lucene.Net.Analysis.Synonym
/// <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="false"
/// expand="true" tokenizerFactory="solr.WhitespaceTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre> </summary>
- /// @deprecated (3.4) use <seealso cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
+ /// </fieldType></code> </summary>
+ /// @deprecated (3.4) use <see cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
[Obsolete("(3.4) use <seealso cref=\"SynonymFilterFactory\"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0")]
internal sealed class SlowSynonymFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs
index 5f3cff9..178618c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs
@@ -27,8 +27,8 @@ namespace Lucene.Net.Analysis.Synonym
*/
/// <summary>
- /// Mapping rules for use with <seealso cref="SlowSynonymFilter"/> </summary>
- /// @deprecated (3.4) use <seealso cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
+ /// Mapping rules for use with <see cref="SlowSynonymFilter"/> </summary>
+ /// @deprecated (3.4) use <see cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
[Obsolete("(3.4) use SynonymFilterFactory instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0")]
internal class SlowSynonymMap
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
index 6be6319..112c7fa 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
@@ -40,7 +40,7 @@ namespace Lucene.Net.Analysis.Synonym
/// a -> x
/// a b -> y
/// b c d -> z
- /// </pre>
+ /// </code>
///
/// Then input <code>a b c d e</code> parses to <code>y b c
/// d</code>, ie the 2nd rule "wins" because it started
@@ -265,9 +265,9 @@ namespace Lucene.Net.Analysis.Synonym
/// <param name="input"> input tokenstream </param>
/// <param name="synonyms"> synonym map </param>
- /// <param name="ignoreCase"> case-folds input for matching with <seealso cref="Character#toLowerCase(int)"/>.
+ /// <param name="ignoreCase"> case-folds input for matching with <see cref="Character#toLowerCase(int)"/>.
/// Note, if you set this to true, its your responsibility to lowercase
- /// the input entries when you create the <seealso cref="SynonymMap"/> </param>
+ /// the input entries when you create the <see cref="SynonymMap"/> </param>
public SynonymFilter(TokenStream input, SynonymMap synonyms, bool ignoreCase) : base(input)
{
termAtt = AddAttribute<ICharTermAttribute>();
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
index ce209bb..4c05334 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Synonym
*/
/// <summary>
- /// Factory for <seealso cref="SynonymFilter"/>.
+ /// Factory for <see cref="SynonymFilter"/>.
/// <pre class="prettyprint" >
/// <fieldType name="text_synonym" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Synonym
/// tokenizerFactory="solr.WhitespaceTokenizerFactory"
/// [optional tokenizer factory parameters]/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
///
/// <para>
/// An optional param name prefix of "tokenizerFactory." may be used for any
@@ -44,14 +44,14 @@ namespace Lucene.Net.Analysis.Synonym
/// </para>
/// <para>
/// The optional {@code format} parameter controls how the synonyms will be parsed:
- /// It supports the short names of {@code solr} for <seealso cref="SolrSynonymParser"/>
- /// and {@code wordnet} for and <seealso cref="WordnetSynonymParser"/>, or your own
+ /// It supports the short names of {@code solr} for <see cref="SolrSynonymParser"/>
+ /// and {@code wordnet} for and <see cref="WordnetSynonymParser"/>, or your own
/// {@code SynonymMap.Parser} class name. The default is {@code solr}.
- /// A custom <seealso cref="SynonymMap.Parser"/> is expected to have a constructor taking:
+ /// A custom <see cref="SynonymMap.Parser"/> is expected to have a constructor taking:
/// <ul>
/// <li><code>boolean dedup</code> - true if duplicates should be ignored, false otherwise</li>
/// <li><code>boolean expand</code> - true if conflation groups should be expanded, false if they are one-directional</li>
- /// <li><code><seealso cref="Analyzer"/> analyzer</code> - an analyzer used for each raw synonym</li>
+ /// <li><code><see cref="Analyzer"/> analyzer</code> - an analyzer used for each raw synonym</li>
/// </ul>
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
index e471cc3..ca9e038 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
@@ -241,7 +241,7 @@ namespace Lucene.Net.Analysis.Synonym
}
/// <summary>
- /// Builds an <seealso cref="SynonymMap"/> and returns it.
+ /// Builds an <see cref="SynonymMap"/> and returns it.
/// </summary>
public virtual SynonymMap Build()
{
@@ -349,13 +349,13 @@ namespace Lucene.Net.Analysis.Synonym
}
/// <summary>
- /// Parse the given input, adding synonyms to the inherited <seealso cref="Builder"/>. </summary>
+ /// Parse the given input, adding synonyms to the inherited <see cref="Builder"/>. </summary>
/// <param name="in"> The input to parse </param>
public abstract void Parse(TextReader @in);
/// <summary>
/// Sugar: analyzes the text with the analyzer and
- /// separates by <seealso cref="SynonymMap#WORD_SEPARATOR"/>.
+ /// separates by <see cref="SynonymMap#WORD_SEPARATOR"/>.
/// reuse and its chars must not be null.
/// </summary>
public virtual CharsRef Analyze(string text, CharsRef reuse)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
index 7569796..f143f90 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Thai language. It uses <seealso cref="java.text.BreakIterator"/> to break words.
+ /// <see cref="Analyzer"/> for Thai language. It uses <see cref="java.text.BreakIterator"/> to break words.
/// <para>
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating ThaiAnalyzer:
/// <ul>
/// <li> As of 3.6, a set of Thai stopwords is used by default
@@ -108,13 +108,13 @@ namespace Lucene.Net.Analysis.Th
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="ThaiWordFilter"/>, and
- /// <seealso cref="StopFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="ThaiWordFilter"/>, and
+ /// <see cref="StopFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
if (m_matchVersion.OnOrAfter(LuceneVersion.LUCENE_48))
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs
index ae3ab1a..21287f2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// Tokenizer that use <seealso cref="BreakIterator"/> to tokenize Thai text.
+ /// Tokenizer that use <see cref="BreakIterator"/> to tokenize Thai text.
/// <para>WARNING: this tokenizer may not be supported by all JREs.
/// It is known to work with Sun/Oracle and Harmony JREs.
/// If your application needs to be fully portable, consider using ICUTokenizer instead,
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs
index 8dc16a8..67a1388 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// Factory for <seealso cref="ThaiTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ThaiTokenizer"/>.
+ /// <code>
/// <fieldType name="text_thai" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.ThaiTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ThaiTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs
index d55733a..8387639 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs
@@ -28,10 +28,10 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// <seealso cref="TokenFilter"/> that use <seealso cref="java.text.BreakIterator"/> to break each
+ /// <see cref="TokenFilter"/> that use <see cref="java.text.BreakIterator"/> to break each
/// Token that is Thai into separate Token(s) for each Thai word.
/// <para>Please note: Since matchVersion 3.1 on, this filter no longer lowercases non-thai text.
- /// <seealso cref="ThaiAnalyzer"/> will insert a <seealso cref="LowerCaseFilter"/> before this filter
+ /// <see cref="ThaiAnalyzer"/> will insert a <see cref="LowerCaseFilter"/> before this filter
/// so the behaviour of the Analyzer does not change. With version 3.1, the filter handles
/// position increments correctly.
/// </para>
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Th
/// which uses an ICU Thai BreakIterator that will always be available.
/// </para>
/// </summary>
- /// @deprecated Use <seealso cref="ThaiTokenizer"/> instead.
+ /// @deprecated Use <see cref="ThaiTokenizer"/> instead.
[Obsolete("Use ThaiTokenizer instead.")]
public sealed class ThaiWordFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs
index 18d19b8..6b289f9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// Factory for <seealso cref="ThaiWordFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ThaiWordFilter"/>.
+ /// <code>
/// <fieldType name="text_thai" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.ThaiWordFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre> </summary>
- /// @deprecated Use <seealso cref="ThaiTokenizerFactory"/> instead
+ /// </fieldType></code> </summary>
+ /// @deprecated Use <see cref="ThaiTokenizerFactory"/> instead
[Obsolete("Use ThaiTokenizerFactory instead")]
public class ThaiWordFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs
index a4cd69b..198c382 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Tr
*/
/// <summary>
- /// Factory for <seealso cref="ApostropheFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ApostropheFilter"/>.
+ /// <code>
/// <fieldType name="text_tr_lower_apostrophes" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.ApostropheFilterFactory"/>
/// <filter class="solr.TurkishLowerCaseFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ApostropheFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
index c54966a..93f08c4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Tr
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Turkish.
+ /// <see cref="Analyzer"/> for Turkish.
/// </summary>
public sealed class TurkishAnalyzer : StopwordAnalyzerBase
{
@@ -84,7 +84,7 @@ namespace Lucene.Net.Analysis.Tr
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public TurkishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -103,7 +103,7 @@ namespace Lucene.Net.Analysis.Tr
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -116,15 +116,15 @@ namespace Lucene.Net.Analysis.Tr
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="TurkishLowerCaseFilter"/>,
- /// <seealso cref="StopFilter"/>, <seealso cref="SetKeywordMarkerFilter"/> if a stem
- /// exclusion set is provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="TurkishLowerCaseFilter"/>,
+ /// <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem
+ /// exclusion set is provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs
index cab7d5d..486b2c0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Tr
*/
/// <summary>
- /// Factory for <seealso cref="TurkishLowerCaseFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="TurkishLowerCaseFilter"/>.
+ /// <code>
/// <fieldType name="text_trlwr" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.TurkishLowerCaseFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class TurkishLowerCaseFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
index 411924b..1eebb02 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
@@ -28,13 +28,13 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Abstract parent class for analysis factories <seealso cref="TokenizerFactory"/>,
- /// <seealso cref="TokenFilterFactory"/> and <seealso cref="CharFilterFactory"/>.
+ /// Abstract parent class for analysis factories <see cref="TokenizerFactory"/>,
+ /// <see cref="TokenFilterFactory"/> and <see cref="CharFilterFactory"/>.
/// <para>
/// The typical lifecycle for a factory consumer is:
/// <ol>
/// <li>Create factory via its constructor (or via XXXFactory.forName)</li>
- /// <li>(Optional) If the factory uses resources such as files, <seealso cref="ResourceLoaderAware#inform(ResourceLoader)"/> is called to initialize those resources.</li>
+ /// <li>(Optional) If the factory uses resources such as files, <see cref="ResourceLoaderAware#inform(ResourceLoader)"/> is called to initialize those resources.</li>
/// <li>Consumer calls create() to obtain instances.</li>
/// </ol>
/// </para>
@@ -75,9 +75,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// this method can be called in the <seealso cref="TokenizerFactory#create(java.io.Reader)"/>
- /// or <seealso cref="TokenFilterFactory#create(org.apache.lucene.analysis.TokenStream)"/> methods,
- /// to inform user, that for this factory a <seealso cref="#luceneMatchVersion"/> is required
+ /// this method can be called in the <see cref="TokenizerFactory#create(java.io.Reader)"/>
+ /// or <see cref="TokenFilterFactory#create(org.apache.lucene.analysis.TokenStream)"/> methods,
+ /// to inform user, that for this factory a <see cref="#luceneMatchVersion"/> is required
/// </summary>
protected internal void AssureMatchVersion()
{
@@ -299,7 +299,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Returns as <seealso cref="CharArraySet"/> from wordFiles, which
+ /// Returns as <see cref="CharArraySet"/> from wordFiles, which
/// can be a comma-separated list of filenames
/// </summary>
protected internal CharArraySet GetWordSet(IResourceLoader loader, string wordFiles, bool ignoreCase)
@@ -330,7 +330,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// same as <seealso cref="#getWordSet(ResourceLoader, String, boolean)"/>,
+ /// same as <see cref="#getWordSet(ResourceLoader, String, boolean)"/>,
/// except the input is in snowball format.
/// </summary>
protected internal CharArraySet GetSnowballWordSet(IResourceLoader loader, string wordFiles, bool ignoreCase)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs
index 03b949d..3ba2f08 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs
@@ -49,9 +49,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Reloads the internal SPI list from the given <seealso cref="ClassLoader"/>.
+ /// Reloads the internal SPI list from the given <see cref="ClassLoader"/>.
/// Changes to the service list are visible after the method ends, all
- /// iterators (<seealso cref="#iterator()"/>,...) stay consistent.
+ /// iterators (<see cref="#iterator()"/>,...) stay consistent.
///
/// <p><b>NOTE:</b> Only new service providers are added, existing ones are
/// never removed or replaced.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs
index 9905f99..e50e87e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// A CharacterIterator used internally for use with <seealso cref="BreakIterator"/>
+ /// A CharacterIterator used internally for use with <see cref="BreakIterator"/>
/// @lucene.internal
/// </summary>
public abstract class CharArrayIterator : CharacterIterator
@@ -170,7 +170,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Create a new CharArrayIterator that works around JRE bugs
- /// in a manner suitable for <seealso cref="BreakIterator#getSentenceInstance()"/>
+ /// in a manner suitable for <see cref="BreakIterator#getSentenceInstance()"/>
/// </summary>
public static CharArrayIterator NewSentenceInstance()
{
@@ -188,7 +188,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Create a new CharArrayIterator that works around JRE bugs
- /// in a manner suitable for <seealso cref="BreakIterator#getWordInstance()"/>
+ /// in a manner suitable for <see cref="BreakIterator#getWordInstance()"/>
/// </summary>
public static CharArrayIterator NewWordInstance()
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
index ac5edfa..5cade2d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
@@ -2017,7 +2017,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Empty <seealso cref="CharArrayMap{V}.UnmodifiableCharArrayMap"/> optimized for speed.
+ /// Empty <see cref="CharArrayMap{V}.UnmodifiableCharArrayMap"/> optimized for speed.
/// Contains checks will always return <code>false</code> or throw
/// NPE if necessary.
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
index e968afb..a6a1efe 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Abstract parent class for analysis factories that create <seealso cref="CharFilter"/>
+ /// Abstract parent class for analysis factories that create <see cref="CharFilter"/>
/// instances.
/// </summary>
public abstract class CharFilterFactory : AbstractAnalysisFactory
@@ -52,9 +52,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Reloads the factory list from the given <seealso cref="ClassLoader"/>.
+ /// Reloads the factory list from the given <see cref="ClassLoader"/>.
/// Changes to the factories are visible after the method ends, all
- /// iterators (<seealso cref="#availableCharFilters()"/>,...) stay consistent.
+ /// iterators (<see cref="#availableCharFilters()"/>,...) stay consistent.
///
/// <para><b>NOTE:</b> Only new factories are added, existing ones are
/// never removed or replaced.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
index daf5683..58cc255 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
@@ -26,37 +26,37 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// An abstract base class for simple, character-oriented tokenizers.
/// <para>
- /// <a name="version">You must specify the required <seealso cref="LuceneVersion"/> compatibility
- /// when creating <seealso cref="CharTokenizer"/>:
+ /// <a name="version">You must specify the required <see cref="LuceneVersion"/> compatibility
+ /// when creating <see cref="CharTokenizer"/>:
/// <ul>
- /// <li>As of 3.1, <seealso cref="CharTokenizer"/> uses an int based API to normalize and
- /// detect token codepoints. See <seealso cref="#isTokenChar(int)"/> and
- /// <seealso cref="#normalize(int)"/> for details.</li>
+ /// <li>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+ /// detect token codepoints. See <see cref="#isTokenChar(int)"/> and
+ /// <see cref="#normalize(int)"/> for details.</li>
/// </ul>
/// </para>
/// <para>
- /// A new <seealso cref="CharTokenizer"/> API has been introduced with Lucene 3.1. This API
+ /// A new <see cref="CharTokenizer"/> API has been introduced with Lucene 3.1. This API
/// moved from UTF-16 code units to UTF-32 codepoints to eventually add support
/// for <a href=
/// "http://java.sun.com/j2se/1.5.0/docs/api/java/lang/Character.html#supplementary"
/// >supplementary characters</a>. The old <i>char</i> based API has been
/// deprecated and should be replaced with the <i>int</i> based methods
- /// <seealso cref="#isTokenChar(int)"/> and <seealso cref="#normalize(int)"/>.
+ /// <see cref="#isTokenChar(int)"/> and <see cref="#normalize(int)"/>.
/// </para>
/// <para>
- /// As of Lucene 3.1 each <seealso cref="CharTokenizer"/> - constructor expects a
- /// <seealso cref="LuceneVersion"/> argument. Based on the given <seealso cref="LuceneVersion"/> either the new
+ /// As of Lucene 3.1 each <see cref="CharTokenizer"/> - constructor expects a
+ /// <see cref="LuceneVersion"/> argument. Based on the given <see cref="LuceneVersion"/> either the new
/// API or a backwards compatibility layer is used at runtime. For
- /// <seealso cref="LuceneVersion"/> < 3.1 the backwards compatibility layer ensures correct
+ /// <see cref="LuceneVersion"/> < 3.1 the backwards compatibility layer ensures correct
/// behavior even for indexes build with previous versions of Lucene. If a
- /// <seealso cref="LuceneVersion"/> >= 3.1 is used <seealso cref="CharTokenizer"/> requires the new API to
+ /// <see cref="LuceneVersion"/> >= 3.1 is used <see cref="CharTokenizer"/> requires the new API to
/// be implemented by the instantiated class. Yet, the old <i>char</i> based API
/// is not required anymore even if backwards compatibility must be preserved.
- /// <seealso cref="CharTokenizer"/> subclasses implementing the new API are fully backwards
- /// compatible if instantiated with <seealso cref="LuceneVersion"/> < 3.1.
+ /// <see cref="CharTokenizer"/> subclasses implementing the new API are fully backwards
+ /// compatible if instantiated with <see cref="LuceneVersion"/> < 3.1.
/// </para>
/// <para>
- /// <strong>Note:</strong> If you use a subclass of <seealso cref="CharTokenizer"/> with <seealso cref="LuceneVersion"/> >=
+ /// <strong>Note:</strong> If you use a subclass of <see cref="CharTokenizer"/> with <see cref="LuceneVersion"/> >=
/// 3.1 on an index build with a version < 3.1, created tokens might not be
/// compatible with the terms in your index.
/// </para>
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.Util
public abstract class CharTokenizer : Tokenizer
{
/// <summary>
- /// Creates a new <seealso cref="CharTokenizer"/> instance
+ /// Creates a new <see cref="CharTokenizer"/> instance
/// </summary>
/// <param name="matchVersion">
/// Lucene version to match </param>
@@ -78,12 +78,12 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Creates a new <seealso cref="CharTokenizer"/> instance
+ /// Creates a new <see cref="CharTokenizer"/> instance
/// </summary>
/// <param name="matchVersion">
/// Lucene version to match </param>
/// <param name="factory">
- /// the attribute factory to use for this <seealso cref="Tokenizer"/> </param>
+ /// the attribute factory to use for this <see cref="Tokenizer"/> </param>
/// <param name="input">
/// the input to split up into tokens </param>
protected CharTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
index c4475a5..3d8801d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
@@ -25,9 +25,9 @@ namespace Lucene.Net.Analysis.Util
* limitations under the License.
*/
/// <summary>
- /// <seealso cref="CharacterUtils"/> provides a unified interface to Character-related
+ /// <see cref="CharacterUtils"/> provides a unified interface to Character-related
/// operations to implement backwards compatible character operations based on a
- /// <seealso cref="LuceneVersion"/> instance.
+ /// <see cref="LuceneVersion"/> instance.
///
/// @lucene.internal
/// </summary>
@@ -37,13 +37,13 @@ namespace Lucene.Net.Analysis.Util
private static readonly CharacterUtils JAVA_5 = new Java5CharacterUtils();
/// <summary>
- /// Returns a <seealso cref="CharacterUtils"/> implementation according to the given
- /// <seealso cref="LuceneVersion"/> instance.
+ /// Returns a <see cref="CharacterUtils"/> implementation according to the given
+ /// <see cref="LuceneVersion"/> instance.
/// </summary>
/// <param name="matchVersion">
/// a version instance </param>
- /// <returns> a <seealso cref="CharacterUtils"/> implementation according to the given
- /// <seealso cref="LuceneVersion"/> instance. </returns>
+ /// <returns> a <see cref="CharacterUtils"/> implementation according to the given
+ /// <see cref="LuceneVersion"/> instance. </returns>
public static CharacterUtils GetInstance(LuceneVersion matchVersion)
{
#pragma warning disable 612, 618
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Return a <seealso cref="CharacterUtils"/> instance compatible with Java 1.4. </summary>
+ /// Return a <see cref="CharacterUtils"/> instance compatible with Java 1.4. </summary>
public static CharacterUtils Java4Instance
{
get
@@ -62,10 +62,10 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Returns the code point at the given index of the <seealso cref="CharSequence"/>.
- /// Depending on the <seealso cref="LuceneVersion"/> passed to
- /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
- /// of <seealso cref="Character#codePointAt(char[], int)"/> as it would have been
+ /// Returns the code point at the given index of the <see cref="CharSequence"/>.
+ /// Depending on the <see cref="LuceneVersion"/> passed to
+ /// <see cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
+ /// of <see cref="Character#codePointAt(char[], int)"/> as it would have been
/// available on a Java 1.4 JVM or on a later virtual machine version.
/// </summary>
/// <param name="seq">
@@ -85,9 +85,9 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Returns the code point at the given index of the char array where only elements
/// with index less than the limit are used.
- /// Depending on the <seealso cref="LuceneVersion"/> passed to
- /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
- /// of <seealso cref="Character#codePointAt(char[], int)"/> as it would have been
+ /// Depending on the <see cref="LuceneVersion"/> passed to
+ /// <see cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
+ /// of <see cref="Character#codePointAt(char[], int)"/> as it would have been
/// available on a Java 1.4 JVM or on a later virtual machine version.
/// </summary>
/// <param name="chars">
@@ -110,12 +110,12 @@ namespace Lucene.Net.Analysis.Util
public abstract int CodePointCount(string seq);
/// <summary>
- /// Creates a new <seealso cref="CharacterBuffer"/> and allocates a <code>char[]</code>
+ /// Creates a new <see cref="CharacterBuffer"/> and allocates a <code>char[]</code>
/// of the given bufferSize.
/// </summary>
/// <param name="bufferSize">
/// the internal char buffer size, must be <code>>= 2</code> </param>
- /// <returns> a new <seealso cref="CharacterBuffer"/> instance. </returns>
+ /// <returns> a new <see cref="CharacterBuffer"/> instance. </returns>
public static CharacterBuffer NewCharacterBuffer(int bufferSize)
{
if (bufferSize < 2)
@@ -127,7 +127,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
- /// Converts each unicode codepoint to lowerCase via <seealso cref="Character#toLowerCase(int)"/> starting
+ /// Converts each unicode codepoint to lowerCase via <see cref="Character#toLowerCase(int)"/> starting
/// at the given offset. </summary>
/// <param name="buffer"> the char buffer to lowercase </param>
/// <param name="offset"> the offset to start at </param>
@@ -145,7 +145,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Converts each unicode codepoint to UpperCase via <seealso cref="Character#toUpperCase(int)"/> starting
+ /// Converts each unicode codepoint to UpperCase via <see cref="Character#toUpperCase(int)"/> starting
/// at the given offset. </summary>
/// <param name="buffer"> the char buffer to UPPERCASE </param>
/// <param name="offset"> the offset to start at </param>
@@ -200,20 +200,20 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Fills the <seealso cref="CharacterBuffer"/> with characters read from the given
- /// reader <seealso cref="Reader"/>. This method tries to read <code>numChars</code>
- /// characters into the <seealso cref="CharacterBuffer"/>, each call to fill will start
+ /// Fills the <see cref="CharacterBuffer"/> with characters read from the given
+ /// reader <see cref="Reader"/>. This method tries to read <code>numChars</code>
+ /// characters into the <see cref="CharacterBuffer"/>, each call to fill will start
/// filling the buffer from offset <code>0</code> up to <code>numChars</code>.
/// In case code points can span across 2 java characters, this method may
/// only fill <code>numChars - 1</code> characters in order not to split in
/// the middle of a surrogate pair, even if there are remaining characters in
- /// the <seealso cref="Reader"/>.
+ /// the <see cref="Reader"/>.
/// <para>
- /// Depending on the <seealso cref="LuceneVersion"/> passed to
- /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method implements
+ /// Depending on the <see cref="LuceneVersion"/> passed to
+ /// <see cref="CharacterUtils#getInstance(Version)"/> this method implements
/// supplementary character awareness when filling the given buffer. For all
- /// <seealso cref="LuceneVersion"/> > 3.0 <seealso cref="#fill(CharacterBuffer, Reader, int)"/> guarantees
- /// that the given <seealso cref="CharacterBuffer"/> will never contain a high surrogate
+ /// <see cref="LuceneVersion"/> > 3.0 <see cref="#fill(CharacterBuffer, Reader, int)"/> guarantees
+ /// that the given <see cref="CharacterBuffer"/> will never contain a high surrogate
/// character as the last element in the buffer unless it is the last available
/// character in the reader. In other words, high and low surrogate pairs will
/// always be preserved across buffer boarders.
@@ -232,7 +232,7 @@ namespace Lucene.Net.Analysis.Util
/// the number of chars to read </param>
/// <returns> <code>false</code> if and only if reader.read returned -1 while trying to fill the buffer </returns>
/// <exception cref="IOException">
- /// if the reader throws an <seealso cref="IOException"/>. </exception>
+ /// if the reader throws an <see cref="IOException"/>. </exception>
public abstract bool Fill(CharacterBuffer buffer, Reader reader, int numChars);
/// <summary>
@@ -384,7 +384,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// A simple IO buffer to use with
- /// <seealso cref="CharacterUtils#fill(CharacterBuffer, Reader)"/>.
+ /// <see cref="CharacterUtils#fill(CharacterBuffer, Reader)"/>.
/// </summary>
public sealed class CharacterBuffer
{
@@ -431,7 +431,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Return the length of the data in the internal buffer starting at
- /// <seealso cref="#getOffset()"/>
+ /// <see cref="#getOffset()"/>
/// </summary>
/// <returns> the length </returns>
public int Length
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
index 329731f..aa425c7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
@@ -23,8 +23,8 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Simple <seealso cref="ResourceLoader"/> that uses <seealso cref="ClassLoader#getResourceAsStream(String)"/>
- /// and <seealso cref="Class#forName(String,boolean,ClassLoader)"/> to open resources and
+ /// Simple <see cref="ResourceLoader"/> that uses <see cref="ClassLoader#getResourceAsStream(String)"/>
+ /// and <see cref="Class#forName(String,boolean,ClassLoader)"/> to open resources and
/// classes, respectively.
/// </summary>
public sealed class ClasspathResourceLoader : IResourceLoader