You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/03 04:41:51 UTC
[04/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
index 2eba697..1538470 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Sinks
/// It is also useful for doing things like entity extraction or proper noun analysis as
/// part of the analysis workflow and saving off those tokens for use in another field.
///
- /// <pre class="prettyprint">
+ /// <code>
/// TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(version, reader1));
/// TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
/// TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
@@ -47,23 +47,23 @@ namespace Lucene.Net.Analysis.Sinks
/// d.add(new TextField("f2", final2, Field.Store.NO));
/// d.add(new TextField("f3", final3, Field.Store.NO));
/// d.add(new TextField("f4", final4, Field.Store.NO));
- /// </pre>
+ /// </code>
/// In this example, <code>sink1</code> and <code>sink2</code> will both get tokens from both
/// <code>reader1</code> and <code>reader2</code> after whitespace tokenizer
/// and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
/// It is important, that tees are consumed before sinks (in the above example, the field names must be
/// less the sink's field names). If you are not sure, which stream is consumed first, you can simply
- /// add another sink and then pass all tokens to the sinks at once using <seealso cref="#consumeAllTokens"/>.
+ /// add another sink and then pass all tokens to the sinks at once using <see cref="#consumeAllTokens"/>.
/// This TokenFilter is exhausted after this. In the above example, change
/// the example above to:
- /// <pre class="prettyprint">
+ /// <code>
/// ...
/// TokenStream final1 = new LowerCaseFilter(version, source1.newSinkTokenStream());
/// TokenStream final2 = source2.newSinkTokenStream();
/// sink1.consumeAllTokens();
/// sink2.consumeAllTokens();
/// ...
- /// </pre>
+ /// </code>
/// In this case, the fields can be added in any order, because the sources are not used anymore and all sinks are ready.
/// <para>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
/// </para>
@@ -81,7 +81,7 @@ namespace Lucene.Net.Analysis.Sinks
}
/// <summary>
- /// Returns a new <seealso cref="SinkTokenStream"/> that receives all tokens consumed by this stream.
+ /// Returns a new <see cref="SinkTokenStream"/> that receives all tokens consumed by this stream.
/// </summary>
public SinkTokenStream NewSinkTokenStream()
{
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Sinks
}
/// <summary>
- /// Returns a new <seealso cref="SinkTokenStream"/> that receives all tokens consumed by this stream
+ /// Returns a new <see cref="SinkTokenStream"/> that receives all tokens consumed by this stream
/// that pass the supplied filter. </summary>
/// <seealso cref= SinkFilter></seealso>
public SinkTokenStream NewSinkTokenStream(SinkFilter filter)
@@ -100,7 +100,7 @@ namespace Lucene.Net.Analysis.Sinks
}
/// <summary>
- /// Adds a <seealso cref="SinkTokenStream"/> created by another <code>TeeSinkTokenFilter</code>
+ /// Adds a <see cref="SinkTokenStream"/> created by another <code>TeeSinkTokenFilter</code>
/// to this one. The supplied stream will also receive all consumed tokens.
/// This method can be used to pass tokens from two different tees to one sink.
/// </summary>
@@ -174,18 +174,18 @@ namespace Lucene.Net.Analysis.Sinks
}
/// <summary>
- /// A filter that decides which <seealso cref="AttributeSource"/> states to store in the sink.
+ /// A filter that decides which <see cref="AttributeSource"/> states to store in the sink.
/// </summary>
public abstract class SinkFilter
{
/// <summary>
- /// Returns true, iff the current state of the passed-in <seealso cref="AttributeSource"/> shall be stored
+ /// Returns true, iff the current state of the passed-in <see cref="AttributeSource"/> shall be stored
/// in the sink.
/// </summary>
public abstract bool Accept(AttributeSource source);
/// <summary>
- /// Called by <seealso cref="SinkTokenStream#reset()"/>. This method does nothing by default
+ /// Called by <see cref="SinkTokenStream#reset()"/>. This method does nothing by default
/// and can optionally be overridden.
/// </summary>
public virtual void Reset()
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
index 4a4786d..352616a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
@@ -27,17 +27,17 @@ namespace Lucene.Net.Analysis.Snowball
*/
/// <summary>
- /// Filters <seealso cref="StandardTokenizer"/> with <seealso cref="StandardFilter"/>, {@link
- /// LowerCaseFilter}, <seealso cref="StopFilter"/> and <seealso cref="SnowballFilter"/>.
+ /// Filters <see cref="StandardTokenizer"/> with <see cref="StandardFilter"/>, {@link
+ /// LowerCaseFilter}, <see cref="StopFilter"/> and <see cref="SnowballFilter"/>.
///
/// Available stemmers are listed in org.tartarus.snowball.ext. The name of a
/// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
- /// <seealso cref="org.tartarus.snowball.ext.EnglishStemmer"/> is named "English".
+ /// <see cref="org.tartarus.snowball.ext.EnglishStemmer"/> is named "English".
///
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>, with the following addition:
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>, with the following addition:
/// <ul>
- /// <li> As of 3.1, uses <seealso cref="TurkishLowerCaseFilter"/> for Turkish language.
+ /// <li> As of 3.1, uses <see cref="TurkishLowerCaseFilter"/> for Turkish language.
/// </ul>
/// </para> </summary>
/// @deprecated (3.1) Use the language-specific analyzer in modules/analysis instead.
@@ -65,9 +65,9 @@ namespace Lucene.Net.Analysis.Snowball
}
/// <summary>
- /// Constructs a <seealso cref="StandardTokenizer"/> filtered by a {@link
- /// StandardFilter}, a <seealso cref="LowerCaseFilter"/>, a <seealso cref="StopFilter"/>,
- /// and a <seealso cref="SnowballFilter"/>
+ /// Constructs a <see cref="StandardTokenizer"/> filtered by a {@link
+ /// StandardFilter}, a <see cref="LowerCaseFilter"/>, a <see cref="StopFilter"/>,
+ /// and a <see cref="SnowballFilter"/>
/// </summary>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
index ca33892..103bd66 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
@@ -25,22 +25,22 @@ namespace Lucene.Net.Analysis.Snowball
/// <summary>
/// A filter that stems words using a Snowball-generated stemmer.
///
- /// Available stemmers are listed in <seealso cref="org.tartarus.snowball.ext"/>.
+ /// Available stemmers are listed in <see cref="org.tartarus.snowball.ext"/>.
/// <para><b>NOTE</b>: SnowballFilter expects lowercased text.
/// <ul>
- /// <li>For the Turkish language, see <seealso cref="TurkishLowerCaseFilter"/>.
- /// <li>For other languages, see <seealso cref="LowerCaseFilter"/>.
+ /// <li>For the Turkish language, see <see cref="TurkishLowerCaseFilter"/>.
+ /// <li>For other languages, see <see cref="LowerCaseFilter"/>.
/// </ul>
/// </para>
///
/// <para>
- /// Note: This filter is aware of the <seealso cref="KeywordAttribute"/>. To prevent
+ /// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
/// certain terms from being passed to the stemmer
- /// <seealso cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
- /// in a previous <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+ /// in a previous <see cref="TokenStream"/>.
///
/// Note: For including the original term as well as the stemmed version, see
- /// <seealso cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+ /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
/// </para>
///
///
@@ -64,9 +64,9 @@ namespace Lucene.Net.Analysis.Snowball
/// <summary>
/// Construct the named stemming filter.
///
- /// Available stemmers are listed in <seealso cref="org.tartarus.snowball.ext"/>.
+ /// Available stemmers are listed in <see cref="org.tartarus.snowball.ext"/>.
/// The name of a stemmer is the part of the class name before "Stemmer",
- /// e.g., the stemmer in <seealso cref="org.tartarus.snowball.ext.EnglishStemmer"/> is named "English".
+ /// e.g., the stemmer in <see cref="org.tartarus.snowball.ext.EnglishStemmer"/> is named "English".
/// </summary>
/// <param name="in"> the input tokens to stem </param>
/// <param name="name"> the name of a stemmer </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs
index 19c199e..707467f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballPorterFilterFactory.cs
@@ -25,17 +25,17 @@ namespace Lucene.Net.Analysis.Snowball
*/
/// <summary>
- /// Factory for <seealso cref="SnowballFilter"/>, with configurable language
+ /// Factory for <see cref="SnowballFilter"/>, with configurable language
/// <para>
/// Note: Use of the "Lovins" stemmer is not recommended, as it is implemented with reflection.
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="text_snowballstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.SnowballPorterFilterFactory" protected="protectedkeyword.txt" language="English"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </para>
/// </summary>
public class SnowballPorterFilterFactory : TokenFilterFactory, IResourceLoaderAware
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
index 7305f56..70aa887 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Filters <seealso cref="ClassicTokenizer"/> with <seealso cref="ClassicFilter"/>, {@link
- /// LowerCaseFilter} and <seealso cref="StopFilter"/>, using a list of
+ /// Filters <see cref="ClassicTokenizer"/> with <see cref="ClassicFilter"/>, {@link
+ /// LowerCaseFilter} and <see cref="StopFilter"/>, using a list of
/// English stop words.
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="LuceneVersion"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating ClassicAnalyzer:
/// <ul>
/// <li> As of 3.1, StopFilter correctly handles Unicode 4.0
@@ -40,7 +40,7 @@ namespace Lucene.Net.Analysis.Standard
/// </ul>
///
/// ClassicAnalyzer was named StandardAnalyzer in Lucene versions prior to 3.1.
- /// As of 3.1, <seealso cref="StandardAnalyzer"/> implements Unicode text segmentation,
+ /// As of 3.1, <see cref="StandardAnalyzer"/> implements Unicode text segmentation,
/// as specified by UAX#29.
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs
index 297c406..c9bd8cf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilter.cs
@@ -19,7 +19,7 @@ namespace Lucene.Net.Analysis.Standard
* limitations under the License.
*/
/// <summary>
- /// Normalizes tokens extracted with <seealso cref="ClassicTokenizer"/>. </summary>
+ /// Normalizes tokens extracted with <see cref="ClassicTokenizer"/>. </summary>
public class ClassicFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs
index d310f68..80fac18 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="ClassicFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ClassicFilter"/>.
+ /// <code>
/// <fieldType name="text_clssc" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.ClassicTokenizerFactory"/>
/// <filter class="solr.ClassicFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ClassicFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
index ae4af96..415bdb7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
@@ -40,7 +40,7 @@ namespace Lucene.Net.Analysis.Standard
/// directory to your project and maintaining your own grammar-based tokenizer.
///
/// ClassicTokenizer was named StandardTokenizer in Lucene versions prior to 3.1.
- /// As of 3.1, <seealso cref="StandardTokenizer"/> implements Unicode text segmentation,
+ /// As of 3.1, <see cref="StandardTokenizer"/> implements Unicode text segmentation,
/// as specified by UAX#29.
/// </para>
/// </summary>
@@ -92,7 +92,7 @@ namespace Lucene.Net.Analysis.Standard
/// <summary>
- /// Creates a new instance of the <seealso cref="ClassicTokenizer"/>. Attaches
+ /// Creates a new instance of the <see cref="ClassicTokenizer"/>. Attaches
/// the <code>input</code> to the newly created JFlex scanner.
/// </summary>
/// <param name="input"> The input reader
@@ -105,7 +105,7 @@ namespace Lucene.Net.Analysis.Standard
}
/// <summary>
- /// Creates a new ClassicTokenizer with a given <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
+ /// Creates a new ClassicTokenizer with a given <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
/// </summary>
public ClassicTokenizer(LuceneVersion matchVersion, AttributeFactory factory, Reader input)
: base(factory, input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs
index 4f895ab..079e824 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizerFactory.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="ClassicTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ClassicTokenizer"/>.
+ /// <code>
/// <fieldType name="text_clssc" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.ClassicTokenizerFactory" maxTokenLength="120"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ClassicTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
index 5a31f16..5770b55 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Filters <seealso cref="StandardTokenizer"/> with <seealso cref="StandardFilter"/>, {@link
- /// LowerCaseFilter} and <seealso cref="StopFilter"/>, using a list of
+ /// Filters <see cref="StandardTokenizer"/> with <see cref="StandardFilter"/>, {@link
+ /// LowerCaseFilter} and <see cref="StopFilter"/>, using a list of
/// English stop words.
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="LuceneVersion"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating StandardAnalyzer:
/// <ul>
/// <li> As of 3.4, Hiragana and Han characters are no longer wrongly split
@@ -36,7 +36,7 @@ namespace Lucene.Net.Analysis.Standard
/// you get the exact broken behavior for backwards compatibility.
/// <li> As of 3.1, StandardTokenizer implements Unicode text segmentation,
/// and StopFilter correctly handles Unicode 4.0 supplementary characters
- /// in stopwords. <seealso cref="ClassicTokenizer"/> and <seealso cref="ClassicAnalyzer"/>
+ /// in stopwords. <see cref="ClassicTokenizer"/> and <see cref="ClassicAnalyzer"/>
/// are the pre-3.1 implementations of StandardTokenizer and
/// StandardAnalyzer.
/// <li> As of 2.9, StopFilter preserves position increments
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs
index 5513468..0c78b06 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilter.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Normalizes tokens extracted with <seealso cref="StandardTokenizer"/>.
+ /// Normalizes tokens extracted with <see cref="StandardTokenizer"/>.
/// </summary>
public class StandardFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs
index d968bc4..5476920 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="StandardFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="StandardFilter"/>.
+ /// <code>
/// <fieldType name="text_stndrd" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.StandardFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class StandardFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
index 37f2ac7..deae880 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Standard
///
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating StandardTokenizer:
/// <ul>
/// <li> As of 3.4, Hiragana and Han characters are no longer wrongly split
@@ -49,7 +49,7 @@ namespace Lucene.Net.Analysis.Standard
/// you get the exact broken behavior for backwards compatibility.
/// <li> As of 3.1, StandardTokenizer implements Unicode text segmentation.
/// If you use a previous version number, you get the exact behavior of
- /// <seealso cref="ClassicTokenizer"/> for backwards compatibility.
+ /// <see cref="ClassicTokenizer"/> for backwards compatibility.
/// </ul>
/// </para>
/// </summary>
@@ -119,7 +119,7 @@ namespace Lucene.Net.Analysis.Standard
/// <summary>
- /// Creates a new instance of the <seealso cref="StandardTokenizer"/>. Attaches
+ /// Creates a new instance of the <see cref="StandardTokenizer"/>. Attaches
/// the <code>input</code> to the newly created JFlex scanner.
/// </summary>
/// <param name="input"> The input reader
@@ -132,7 +132,7 @@ namespace Lucene.Net.Analysis.Standard
}
/// <summary>
- /// Creates a new StandardTokenizer with a given <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
+ /// Creates a new StandardTokenizer with a given <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
/// </summary>
public StandardTokenizer(Version matchVersion, AttributeFactory factory, Reader input)
: base(factory, input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs
index f933316..cbc5915 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerFactory.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="StandardTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="StandardTokenizer"/>.
+ /// <code>
/// <fieldType name="text_stndrd" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory" maxTokenLength="255"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class StandardTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs
index d15a349..c250996 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerInterface.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Standard
/// Resumes scanning until the next regular expression is matched,
/// the end of input is encountered or an I/O-Error occurs.
/// </summary>
- /// <returns> the next token, <seealso cref="#YYEOF"/> on end of stream </returns>
+ /// <returns> the next token, <see cref="#YYEOF"/> on end of stream </returns>
/// <exception cref="IOException"> if any I/O-Error occurs </exception>
int GetNextToken();
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs
index 9ff6b74..502b98c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailAnalyzer.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Filters <seealso cref="org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer"/>
- /// with <seealso cref="StandardFilter"/>,
- /// <seealso cref="LowerCaseFilter"/> and
- /// <seealso cref="StopFilter"/>, using a list of
+ /// Filters <see cref="org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer"/>
+ /// with <see cref="StandardFilter"/>,
+ /// <see cref="LowerCaseFilter"/> and
+ /// <see cref="StopFilter"/>, using a list of
/// English stop words.
///
/// <a name="version"/>
/// <para>
- /// You must specify the required <seealso cref="org.apache.lucene.util.Version"/>
+ /// You must specify the required <see cref="org.apache.lucene.util.Version"/>
/// compatibility when creating UAX29URLEmailAnalyzer
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
index 0821678..2c91236 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
@@ -44,7 +44,7 @@ namespace Lucene.Net.Analysis.Standard
/// <li><HIRAGANA>: A single hiragana character</li>
/// </ul>
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating UAX29URLEmailTokenizer:
/// <ul>
/// <li> As of 3.4, Hiragana and Han characters are no longer wrongly split
@@ -110,7 +110,7 @@ namespace Lucene.Net.Analysis.Standard
}
/// <summary>
- /// Creates a new UAX29URLEmailTokenizer with a given <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
+ /// Creates a new UAX29URLEmailTokenizer with a given <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>
/// </summary>
public UAX29URLEmailTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader input)
: base(factory, input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs
index 9375c3a..dc902f8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerFactory.cs
@@ -24,13 +24,13 @@ namespace Lucene.Net.Analysis.Standard
*/
/// <summary>
- /// Factory for <seealso cref="UAX29URLEmailTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="UAX29URLEmailTokenizer"/>.
+ /// <code>
/// <fieldType name="text_urlemail" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.UAX29URLEmailTokenizerFactory" maxTokenLength="255"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class UAX29URLEmailTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
index c1fefe3..7a334a7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Sv
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Swedish.
+ /// <see cref="Analyzer"/> for Swedish.
/// </summary>
public sealed class SwedishAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Sv
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public SwedishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Sv
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Sv
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs
index 32bb191..356469e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.Sv
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="SwedishLightStemmer"/> to stem Swedish
+ /// A <see cref="TokenFilter"/> that applies <see cref="SwedishLightStemmer"/> to stem Swedish
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class SwedishLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs
index 213154a..11ca0ab 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Sv
*/
/// <summary>
- /// Factory for <seealso cref="SwedishLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="SwedishLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.SwedishLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class SwedishLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs
index 6aaa25f..2b09a51 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/FSTSynonymFilterFactory.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Synonym
private SynonymMap map;
- [Obsolete(@"(3.4) use <seealso cref=""SynonymFilterFactory"" instead. this is only a backwards compatibility")]
+ [Obsolete(@"(3.4) use <see cref=""SynonymFilterFactory"" instead. this is only a backwards compatibility")]
public FSTSynonymFilterFactory(IDictionary<string, string> args)
: base(args)
{
@@ -122,7 +122,7 @@ namespace Lucene.Net.Analysis.Synonym
}
/// <summary>
- /// Load synonyms with the given <seealso cref="SynonymMap.Parser"/> class.
+ /// Load synonyms with the given <see cref="SynonymMap.Parser"/> class.
/// </summary>
private SynonymMap LoadSynonyms(IResourceLoader loader, string cname, bool dedup, Analyzer analyzer)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs
index 0a8ee7e..b9b7eb0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilter.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Synonym
/// Generated synonyms will start at the same position as the first matched source token.
/// </para>
/// </summary>
- /// @deprecated (3.4) use <seealso cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
+ /// @deprecated (3.4) use <see cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
[Obsolete("(3.4) use <seealso cref=\"SynonymFilterFactory\"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0")]
internal sealed class SlowSynonymFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs
index f53f978..7d51320 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymFilterFactory.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Synonym
*/
/// <summary>
- /// Factory for <seealso cref="SlowSynonymFilter"/> (only used with luceneMatchVersion < 3.4)
+ /// Factory for <see cref="SlowSynonymFilter"/> (only used with luceneMatchVersion < 3.4)
/// <pre class="prettyprint" >
/// <fieldType name="text_synonym" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
@@ -38,8 +38,8 @@ namespace Lucene.Net.Analysis.Synonym
/// <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="false"
/// expand="true" tokenizerFactory="solr.WhitespaceTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre> </summary>
- /// @deprecated (3.4) use <seealso cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
+ /// </fieldType></code> </summary>
+ /// @deprecated (3.4) use <see cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
[Obsolete("(3.4) use <seealso cref=\"SynonymFilterFactory\"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0")]
internal sealed class SlowSynonymFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs
index 5f3cff9..178618c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SlowSynonymMap.cs
@@ -27,8 +27,8 @@ namespace Lucene.Net.Analysis.Synonym
*/
/// <summary>
- /// Mapping rules for use with <seealso cref="SlowSynonymFilter"/> </summary>
- /// @deprecated (3.4) use <seealso cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
+ /// Mapping rules for use with <see cref="SlowSynonymFilter"/> </summary>
+ /// @deprecated (3.4) use <see cref="SynonymFilterFactory"/> instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0
[Obsolete("(3.4) use SynonymFilterFactory instead. only for precise index backwards compatibility. this factory will be removed in Lucene 5.0")]
internal class SlowSynonymMap
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
index 6be6319..112c7fa 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilter.cs
@@ -40,7 +40,7 @@ namespace Lucene.Net.Analysis.Synonym
/// a -> x
/// a b -> y
/// b c d -> z
- /// </pre>
+ /// </code>
///
/// Then input <code>a b c d e</code> parses to <code>y b c
/// d</code>, ie the 2nd rule "wins" because it started
@@ -265,9 +265,9 @@ namespace Lucene.Net.Analysis.Synonym
/// <param name="input"> input tokenstream </param>
/// <param name="synonyms"> synonym map </param>
- /// <param name="ignoreCase"> case-folds input for matching with <seealso cref="Character#toLowerCase(int)"/>.
+ /// <param name="ignoreCase"> case-folds input for matching with <see cref="Character#toLowerCase(int)"/>.
/// Note, if you set this to true, its your responsibility to lowercase
- /// the input entries when you create the <seealso cref="SynonymMap"/> </param>
+ /// the input entries when you create the <see cref="SynonymMap"/> </param>
public SynonymFilter(TokenStream input, SynonymMap synonyms, bool ignoreCase) : base(input)
{
termAtt = AddAttribute<ICharTermAttribute>();
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
index ce209bb..4c05334 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Synonym
*/
/// <summary>
- /// Factory for <seealso cref="SynonymFilter"/>.
+ /// Factory for <see cref="SynonymFilter"/>.
/// <pre class="prettyprint" >
/// <fieldType name="text_synonym" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Synonym
/// tokenizerFactory="solr.WhitespaceTokenizerFactory"
/// [optional tokenizer factory parameters]/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
///
/// <para>
/// An optional param name prefix of "tokenizerFactory." may be used for any
@@ -44,14 +44,14 @@ namespace Lucene.Net.Analysis.Synonym
/// </para>
/// <para>
/// The optional {@code format} parameter controls how the synonyms will be parsed:
- /// It supports the short names of {@code solr} for <seealso cref="SolrSynonymParser"/>
- /// and {@code wordnet} for and <seealso cref="WordnetSynonymParser"/>, or your own
+ /// It supports the short names of {@code solr} for <see cref="SolrSynonymParser"/>
+ /// and {@code wordnet} for and <see cref="WordnetSynonymParser"/>, or your own
/// {@code SynonymMap.Parser} class name. The default is {@code solr}.
- /// A custom <seealso cref="SynonymMap.Parser"/> is expected to have a constructor taking:
+ /// A custom <see cref="SynonymMap.Parser"/> is expected to have a constructor taking:
/// <ul>
/// <li><code>boolean dedup</code> - true if duplicates should be ignored, false otherwise</li>
/// <li><code>boolean expand</code> - true if conflation groups should be expanded, false if they are one-directional</li>
- /// <li><code><seealso cref="Analyzer"/> analyzer</code> - an analyzer used for each raw synonym</li>
+ /// <li><code><see cref="Analyzer"/> analyzer</code> - an analyzer used for each raw synonym</li>
/// </ul>
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
index e471cc3..ca9e038 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymMap.cs
@@ -241,7 +241,7 @@ namespace Lucene.Net.Analysis.Synonym
}
/// <summary>
- /// Builds an <seealso cref="SynonymMap"/> and returns it.
+ /// Builds an <see cref="SynonymMap"/> and returns it.
/// </summary>
public virtual SynonymMap Build()
{
@@ -349,13 +349,13 @@ namespace Lucene.Net.Analysis.Synonym
}
/// <summary>
- /// Parse the given input, adding synonyms to the inherited <seealso cref="Builder"/>. </summary>
+ /// Parse the given input, adding synonyms to the inherited <see cref="Builder"/>. </summary>
/// <param name="in"> The input to parse </param>
public abstract void Parse(TextReader @in);
/// <summary>
/// Sugar: analyzes the text with the analyzer and
- /// separates by <seealso cref="SynonymMap#WORD_SEPARATOR"/>.
+ /// separates by <see cref="SynonymMap#WORD_SEPARATOR"/>.
/// reuse and its chars must not be null.
/// </summary>
public virtual CharsRef Analyze(string text, CharsRef reuse)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
index 7569796..f143f90 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Thai language. It uses <seealso cref="java.text.BreakIterator"/> to break words.
+ /// <see cref="Analyzer"/> for Thai language. It uses <see cref="java.text.BreakIterator"/> to break words.
/// <para>
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating ThaiAnalyzer:
/// <ul>
/// <li> As of 3.6, a set of Thai stopwords is used by default
@@ -108,13 +108,13 @@ namespace Lucene.Net.Analysis.Th
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="ThaiWordFilter"/>, and
- /// <seealso cref="StopFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="ThaiWordFilter"/>, and
+ /// <see cref="StopFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
if (m_matchVersion.OnOrAfter(LuceneVersion.LUCENE_48))
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs
index ae3ab1a..21287f2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// Tokenizer that use <seealso cref="BreakIterator"/> to tokenize Thai text.
+ /// Tokenizer that use <see cref="BreakIterator"/> to tokenize Thai text.
/// <para>WARNING: this tokenizer may not be supported by all JREs.
/// It is known to work with Sun/Oracle and Harmony JREs.
/// If your application needs to be fully portable, consider using ICUTokenizer instead,
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs
index 8dc16a8..67a1388 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiTokenizerFactory.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// Factory for <seealso cref="ThaiTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ThaiTokenizer"/>.
+ /// <code>
/// <fieldType name="text_thai" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.ThaiTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ThaiTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs
index d55733a..8387639 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilter.cs
@@ -28,10 +28,10 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// <seealso cref="TokenFilter"/> that use <seealso cref="java.text.BreakIterator"/> to break each
+ /// <see cref="TokenFilter"/> that use <see cref="java.text.BreakIterator"/> to break each
/// Token that is Thai into separate Token(s) for each Thai word.
/// <para>Please note: Since matchVersion 3.1 on, this filter no longer lowercases non-thai text.
- /// <seealso cref="ThaiAnalyzer"/> will insert a <seealso cref="LowerCaseFilter"/> before this filter
+ /// <see cref="ThaiAnalyzer"/> will insert a <see cref="LowerCaseFilter"/> before this filter
/// so the behaviour of the Analyzer does not change. With version 3.1, the filter handles
/// position increments correctly.
/// </para>
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Th
/// which uses an ICU Thai BreakIterator that will always be available.
/// </para>
/// </summary>
- /// @deprecated Use <seealso cref="ThaiTokenizer"/> instead.
+ /// @deprecated Use <see cref="ThaiTokenizer"/> instead.
[Obsolete("Use ThaiTokenizer instead.")]
public sealed class ThaiWordFilter : TokenFilter
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs
index 18d19b8..6b289f9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiWordFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Th
*/
/// <summary>
- /// Factory for <seealso cref="ThaiWordFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ThaiWordFilter"/>.
+ /// <code>
/// <fieldType name="text_thai" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.ThaiWordFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre> </summary>
- /// @deprecated Use <seealso cref="ThaiTokenizerFactory"/> instead
+ /// </fieldType></code> </summary>
+ /// @deprecated Use <see cref="ThaiTokenizerFactory"/> instead
[Obsolete("Use ThaiTokenizerFactory instead")]
public class ThaiWordFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs
index a4cd69b..198c382 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Tr/ApostropheFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Tr
*/
/// <summary>
- /// Factory for <seealso cref="ApostropheFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ApostropheFilter"/>.
+ /// <code>
/// <fieldType name="text_tr_lower_apostrophes" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.ApostropheFilterFactory"/>
/// <filter class="solr.TurkishLowerCaseFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ApostropheFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
index c54966a..93f08c4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Tr
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Turkish.
+ /// <see cref="Analyzer"/> for Turkish.
/// </summary>
public sealed class TurkishAnalyzer : StopwordAnalyzerBase
{
@@ -84,7 +84,7 @@ namespace Lucene.Net.Analysis.Tr
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public TurkishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -103,7 +103,7 @@ namespace Lucene.Net.Analysis.Tr
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -116,15 +116,15 @@ namespace Lucene.Net.Analysis.Tr
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="TurkishLowerCaseFilter"/>,
- /// <seealso cref="StopFilter"/>, <seealso cref="SetKeywordMarkerFilter"/> if a stem
- /// exclusion set is provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="TurkishLowerCaseFilter"/>,
+ /// <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem
+ /// exclusion set is provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs
index cab7d5d..486b2c0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishLowerCaseFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Tr
*/
/// <summary>
- /// Factory for <seealso cref="TurkishLowerCaseFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="TurkishLowerCaseFilter"/>.
+ /// <code>
/// <fieldType name="text_trlwr" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.TurkishLowerCaseFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class TurkishLowerCaseFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
index 411924b..1eebb02 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
@@ -28,13 +28,13 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Abstract parent class for analysis factories <seealso cref="TokenizerFactory"/>,
- /// <seealso cref="TokenFilterFactory"/> and <seealso cref="CharFilterFactory"/>.
+ /// Abstract parent class for analysis factories <see cref="TokenizerFactory"/>,
+ /// <see cref="TokenFilterFactory"/> and <see cref="CharFilterFactory"/>.
/// <para>
/// The typical lifecycle for a factory consumer is:
/// <ol>
/// <li>Create factory via its constructor (or via XXXFactory.forName)</li>
- /// <li>(Optional) If the factory uses resources such as files, <seealso cref="ResourceLoaderAware#inform(ResourceLoader)"/> is called to initialize those resources.</li>
+ /// <li>(Optional) If the factory uses resources such as files, <see cref="ResourceLoaderAware#inform(ResourceLoader)"/> is called to initialize those resources.</li>
/// <li>Consumer calls create() to obtain instances.</li>
/// </ol>
/// </para>
@@ -75,9 +75,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// this method can be called in the <seealso cref="TokenizerFactory#create(java.io.Reader)"/>
- /// or <seealso cref="TokenFilterFactory#create(org.apache.lucene.analysis.TokenStream)"/> methods,
- /// to inform user, that for this factory a <seealso cref="#luceneMatchVersion"/> is required
+ /// this method can be called in the <see cref="TokenizerFactory#create(java.io.Reader)"/>
+ /// or <see cref="TokenFilterFactory#create(org.apache.lucene.analysis.TokenStream)"/> methods,
+ /// to inform user, that for this factory a <see cref="#luceneMatchVersion"/> is required
/// </summary>
protected internal void AssureMatchVersion()
{
@@ -299,7 +299,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Returns as <seealso cref="CharArraySet"/> from wordFiles, which
+ /// Returns as <see cref="CharArraySet"/> from wordFiles, which
/// can be a comma-separated list of filenames
/// </summary>
protected internal CharArraySet GetWordSet(IResourceLoader loader, string wordFiles, bool ignoreCase)
@@ -330,7 +330,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// same as <seealso cref="#getWordSet(ResourceLoader, String, boolean)"/>,
+ /// same as <see cref="#getWordSet(ResourceLoader, String, boolean)"/>,
/// except the input is in snowball format.
/// </summary>
protected internal CharArraySet GetSnowballWordSet(IResourceLoader loader, string wordFiles, bool ignoreCase)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs
index 03b949d..3ba2f08 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AnalysisSPILoader.cs
@@ -49,9 +49,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Reloads the internal SPI list from the given <seealso cref="ClassLoader"/>.
+ /// Reloads the internal SPI list from the given <see cref="ClassLoader"/>.
/// Changes to the service list are visible after the method ends, all
- /// iterators (<seealso cref="#iterator()"/>,...) stay consistent.
+ /// iterators (<see cref="#iterator()"/>,...) stay consistent.
///
/// <p><b>NOTE:</b> Only new service providers are added, existing ones are
/// never removed or replaced.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs
index 9905f99..e50e87e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayIterator.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// A CharacterIterator used internally for use with <seealso cref="BreakIterator"/>
+ /// A CharacterIterator used internally for use with <see cref="BreakIterator"/>
/// @lucene.internal
/// </summary>
public abstract class CharArrayIterator : CharacterIterator
@@ -170,7 +170,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Create a new CharArrayIterator that works around JRE bugs
- /// in a manner suitable for <seealso cref="BreakIterator#getSentenceInstance()"/>
+ /// in a manner suitable for <see cref="BreakIterator#getSentenceInstance()"/>
/// </summary>
public static CharArrayIterator NewSentenceInstance()
{
@@ -188,7 +188,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Create a new CharArrayIterator that works around JRE bugs
- /// in a manner suitable for <seealso cref="BreakIterator#getWordInstance()"/>
+ /// in a manner suitable for <see cref="BreakIterator#getWordInstance()"/>
/// </summary>
public static CharArrayIterator NewWordInstance()
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
index ac5edfa..5cade2d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
@@ -2017,7 +2017,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Empty <seealso cref="CharArrayMap{V}.UnmodifiableCharArrayMap"/> optimized for speed.
+ /// Empty <see cref="CharArrayMap{V}.UnmodifiableCharArrayMap"/> optimized for speed.
/// Contains checks will always return <code>false</code> or throw
/// NPE if necessary.
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
index e968afb..a6a1efe 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Abstract parent class for analysis factories that create <seealso cref="CharFilter"/>
+ /// Abstract parent class for analysis factories that create <see cref="CharFilter"/>
/// instances.
/// </summary>
public abstract class CharFilterFactory : AbstractAnalysisFactory
@@ -52,9 +52,9 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Reloads the factory list from the given <seealso cref="ClassLoader"/>.
+ /// Reloads the factory list from the given <see cref="ClassLoader"/>.
/// Changes to the factories are visible after the method ends, all
- /// iterators (<seealso cref="#availableCharFilters()"/>,...) stay consistent.
+ /// iterators (<see cref="#availableCharFilters()"/>,...) stay consistent.
///
/// <para><b>NOTE:</b> Only new factories are added, existing ones are
/// never removed or replaced.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
index daf5683..58cc255 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
@@ -26,37 +26,37 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// An abstract base class for simple, character-oriented tokenizers.
/// <para>
- /// <a name="version">You must specify the required <seealso cref="LuceneVersion"/> compatibility
- /// when creating <seealso cref="CharTokenizer"/>:
+ /// <a name="version">You must specify the required <see cref="LuceneVersion"/> compatibility
+ /// when creating <see cref="CharTokenizer"/>:
/// <ul>
- /// <li>As of 3.1, <seealso cref="CharTokenizer"/> uses an int based API to normalize and
- /// detect token codepoints. See <seealso cref="#isTokenChar(int)"/> and
- /// <seealso cref="#normalize(int)"/> for details.</li>
+ /// <li>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+ /// detect token codepoints. See <see cref="#isTokenChar(int)"/> and
+ /// <see cref="#normalize(int)"/> for details.</li>
/// </ul>
/// </para>
/// <para>
- /// A new <seealso cref="CharTokenizer"/> API has been introduced with Lucene 3.1. This API
+ /// A new <see cref="CharTokenizer"/> API has been introduced with Lucene 3.1. This API
/// moved from UTF-16 code units to UTF-32 codepoints to eventually add support
/// for <a href=
/// "http://java.sun.com/j2se/1.5.0/docs/api/java/lang/Character.html#supplementary"
/// >supplementary characters</a>. The old <i>char</i> based API has been
/// deprecated and should be replaced with the <i>int</i> based methods
- /// <seealso cref="#isTokenChar(int)"/> and <seealso cref="#normalize(int)"/>.
+ /// <see cref="#isTokenChar(int)"/> and <see cref="#normalize(int)"/>.
/// </para>
/// <para>
- /// As of Lucene 3.1 each <seealso cref="CharTokenizer"/> - constructor expects a
- /// <seealso cref="LuceneVersion"/> argument. Based on the given <seealso cref="LuceneVersion"/> either the new
+ /// As of Lucene 3.1 each <see cref="CharTokenizer"/> - constructor expects a
+ /// <see cref="LuceneVersion"/> argument. Based on the given <see cref="LuceneVersion"/> either the new
/// API or a backwards compatibility layer is used at runtime. For
- /// <seealso cref="LuceneVersion"/> < 3.1 the backwards compatibility layer ensures correct
+ /// <see cref="LuceneVersion"/> < 3.1 the backwards compatibility layer ensures correct
/// behavior even for indexes build with previous versions of Lucene. If a
- /// <seealso cref="LuceneVersion"/> >= 3.1 is used <seealso cref="CharTokenizer"/> requires the new API to
+ /// <see cref="LuceneVersion"/> >= 3.1 is used <see cref="CharTokenizer"/> requires the new API to
/// be implemented by the instantiated class. Yet, the old <i>char</i> based API
/// is not required anymore even if backwards compatibility must be preserved.
- /// <seealso cref="CharTokenizer"/> subclasses implementing the new API are fully backwards
- /// compatible if instantiated with <seealso cref="LuceneVersion"/> < 3.1.
+ /// <see cref="CharTokenizer"/> subclasses implementing the new API are fully backwards
+ /// compatible if instantiated with <see cref="LuceneVersion"/> < 3.1.
/// </para>
/// <para>
- /// <strong>Note:</strong> If you use a subclass of <seealso cref="CharTokenizer"/> with <seealso cref="LuceneVersion"/> >=
+ /// <strong>Note:</strong> If you use a subclass of <see cref="CharTokenizer"/> with <see cref="LuceneVersion"/> >=
/// 3.1 on an index build with a version < 3.1, created tokens might not be
/// compatible with the terms in your index.
/// </para>
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.Util
public abstract class CharTokenizer : Tokenizer
{
/// <summary>
- /// Creates a new <seealso cref="CharTokenizer"/> instance
+ /// Creates a new <see cref="CharTokenizer"/> instance
/// </summary>
/// <param name="matchVersion">
/// Lucene version to match </param>
@@ -78,12 +78,12 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Creates a new <seealso cref="CharTokenizer"/> instance
+ /// Creates a new <see cref="CharTokenizer"/> instance
/// </summary>
/// <param name="matchVersion">
/// Lucene version to match </param>
/// <param name="factory">
- /// the attribute factory to use for this <seealso cref="Tokenizer"/> </param>
+ /// the attribute factory to use for this <see cref="Tokenizer"/> </param>
/// <param name="input">
/// the input to split up into tokens </param>
protected CharTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
index c4475a5..3d8801d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharacterUtils.cs
@@ -25,9 +25,9 @@ namespace Lucene.Net.Analysis.Util
* limitations under the License.
*/
/// <summary>
- /// <seealso cref="CharacterUtils"/> provides a unified interface to Character-related
+ /// <see cref="CharacterUtils"/> provides a unified interface to Character-related
/// operations to implement backwards compatible character operations based on a
- /// <seealso cref="LuceneVersion"/> instance.
+ /// <see cref="LuceneVersion"/> instance.
///
/// @lucene.internal
/// </summary>
@@ -37,13 +37,13 @@ namespace Lucene.Net.Analysis.Util
private static readonly CharacterUtils JAVA_5 = new Java5CharacterUtils();
/// <summary>
- /// Returns a <seealso cref="CharacterUtils"/> implementation according to the given
- /// <seealso cref="LuceneVersion"/> instance.
+ /// Returns a <see cref="CharacterUtils"/> implementation according to the given
+ /// <see cref="LuceneVersion"/> instance.
/// </summary>
/// <param name="matchVersion">
/// a version instance </param>
- /// <returns> a <seealso cref="CharacterUtils"/> implementation according to the given
- /// <seealso cref="LuceneVersion"/> instance. </returns>
+ /// <returns> a <see cref="CharacterUtils"/> implementation according to the given
+ /// <see cref="LuceneVersion"/> instance. </returns>
public static CharacterUtils GetInstance(LuceneVersion matchVersion)
{
#pragma warning disable 612, 618
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Return a <seealso cref="CharacterUtils"/> instance compatible with Java 1.4. </summary>
+ /// Return a <see cref="CharacterUtils"/> instance compatible with Java 1.4. </summary>
public static CharacterUtils Java4Instance
{
get
@@ -62,10 +62,10 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Returns the code point at the given index of the <seealso cref="CharSequence"/>.
- /// Depending on the <seealso cref="LuceneVersion"/> passed to
- /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
- /// of <seealso cref="Character#codePointAt(char[], int)"/> as it would have been
+ /// Returns the code point at the given index of the <see cref="CharSequence"/>.
+ /// Depending on the <see cref="LuceneVersion"/> passed to
+ /// <see cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
+ /// of <see cref="Character#codePointAt(char[], int)"/> as it would have been
/// available on a Java 1.4 JVM or on a later virtual machine version.
/// </summary>
/// <param name="seq">
@@ -85,9 +85,9 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Returns the code point at the given index of the char array where only elements
/// with index less than the limit are used.
- /// Depending on the <seealso cref="LuceneVersion"/> passed to
- /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
- /// of <seealso cref="Character#codePointAt(char[], int)"/> as it would have been
+ /// Depending on the <see cref="LuceneVersion"/> passed to
+ /// <see cref="CharacterUtils#getInstance(Version)"/> this method mimics the behavior
+ /// of <see cref="Character#codePointAt(char[], int)"/> as it would have been
/// available on a Java 1.4 JVM or on a later virtual machine version.
/// </summary>
/// <param name="chars">
@@ -110,12 +110,12 @@ namespace Lucene.Net.Analysis.Util
public abstract int CodePointCount(string seq);
/// <summary>
- /// Creates a new <seealso cref="CharacterBuffer"/> and allocates a <code>char[]</code>
+ /// Creates a new <see cref="CharacterBuffer"/> and allocates a <code>char[]</code>
/// of the given bufferSize.
/// </summary>
/// <param name="bufferSize">
/// the internal char buffer size, must be <code>>= 2</code> </param>
- /// <returns> a new <seealso cref="CharacterBuffer"/> instance. </returns>
+ /// <returns> a new <see cref="CharacterBuffer"/> instance. </returns>
public static CharacterBuffer NewCharacterBuffer(int bufferSize)
{
if (bufferSize < 2)
@@ -127,7 +127,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
- /// Converts each unicode codepoint to lowerCase via <seealso cref="Character#toLowerCase(int)"/> starting
+ /// Converts each unicode codepoint to lowerCase via <see cref="Character#toLowerCase(int)"/> starting
/// at the given offset. </summary>
/// <param name="buffer"> the char buffer to lowercase </param>
/// <param name="offset"> the offset to start at </param>
@@ -145,7 +145,7 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Converts each unicode codepoint to UpperCase via <seealso cref="Character#toUpperCase(int)"/> starting
+ /// Converts each unicode codepoint to UpperCase via <see cref="Character#toUpperCase(int)"/> starting
/// at the given offset. </summary>
/// <param name="buffer"> the char buffer to UPPERCASE </param>
/// <param name="offset"> the offset to start at </param>
@@ -200,20 +200,20 @@ namespace Lucene.Net.Analysis.Util
}
/// <summary>
- /// Fills the <seealso cref="CharacterBuffer"/> with characters read from the given
- /// reader <seealso cref="Reader"/>. This method tries to read <code>numChars</code>
- /// characters into the <seealso cref="CharacterBuffer"/>, each call to fill will start
+ /// Fills the <see cref="CharacterBuffer"/> with characters read from the given
+ /// reader <see cref="Reader"/>. This method tries to read <code>numChars</code>
+ /// characters into the <see cref="CharacterBuffer"/>, each call to fill will start
/// filling the buffer from offset <code>0</code> up to <code>numChars</code>.
/// In case code points can span across 2 java characters, this method may
/// only fill <code>numChars - 1</code> characters in order not to split in
/// the middle of a surrogate pair, even if there are remaining characters in
- /// the <seealso cref="Reader"/>.
+ /// the <see cref="Reader"/>.
/// <para>
- /// Depending on the <seealso cref="LuceneVersion"/> passed to
- /// <seealso cref="CharacterUtils#getInstance(Version)"/> this method implements
+ /// Depending on the <see cref="LuceneVersion"/> passed to
+ /// <see cref="CharacterUtils#getInstance(Version)"/> this method implements
/// supplementary character awareness when filling the given buffer. For all
- /// <seealso cref="LuceneVersion"/> > 3.0 <seealso cref="#fill(CharacterBuffer, Reader, int)"/> guarantees
- /// that the given <seealso cref="CharacterBuffer"/> will never contain a high surrogate
+ /// <see cref="LuceneVersion"/> > 3.0 <see cref="#fill(CharacterBuffer, Reader, int)"/> guarantees
+ /// that the given <see cref="CharacterBuffer"/> will never contain a high surrogate
/// character as the last element in the buffer unless it is the last available
/// character in the reader. In other words, high and low surrogate pairs will
/// always be preserved across buffer boarders.
@@ -232,7 +232,7 @@ namespace Lucene.Net.Analysis.Util
/// the number of chars to read </param>
/// <returns> <code>false</code> if and only if reader.read returned -1 while trying to fill the buffer </returns>
/// <exception cref="IOException">
- /// if the reader throws an <seealso cref="IOException"/>. </exception>
+ /// if the reader throws an <see cref="IOException"/>. </exception>
public abstract bool Fill(CharacterBuffer buffer, Reader reader, int numChars);
/// <summary>
@@ -384,7 +384,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// A simple IO buffer to use with
- /// <seealso cref="CharacterUtils#fill(CharacterBuffer, Reader)"/>.
+ /// <see cref="CharacterUtils#fill(CharacterBuffer, Reader)"/>.
/// </summary>
public sealed class CharacterBuffer
{
@@ -431,7 +431,7 @@ namespace Lucene.Net.Analysis.Util
/// <summary>
/// Return the length of the data in the internal buffer starting at
- /// <seealso cref="#getOffset()"/>
+ /// <see cref="#getOffset()"/>
/// </summary>
/// <returns> the length </returns>
public int Length
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
index 329731f..aa425c7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
@@ -23,8 +23,8 @@ namespace Lucene.Net.Analysis.Util
*/
/// <summary>
- /// Simple <seealso cref="ResourceLoader"/> that uses <seealso cref="ClassLoader#getResourceAsStream(String)"/>
- /// and <seealso cref="Class#forName(String,boolean,ClassLoader)"/> to open resources and
+ /// Simple <see cref="ResourceLoader"/> that uses <see cref="ClassLoader#getResourceAsStream(String)"/>
+ /// and <see cref="Class#forName(String,boolean,ClassLoader)"/> to open resources and
/// classes, respectively.
/// </summary>
public sealed class ClasspathResourceLoader : IResourceLoader