You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/03 04:41:49 UTC
[02/13] lucenenet git commit: Lucene.Net.Analysis.Common: fixes for
some documentation comments previously missed
Lucene.Net.Analysis.Common: fixes for some documentation comments previously missed
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/829f8ee7
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/829f8ee7
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/829f8ee7
Branch: refs/heads/api-work
Commit: 829f8ee75a7767bb8730fcdef1d031c90ff92d5a
Parents: 363ea8e
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 10:32:35 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 10:32:35 2017 +0700
----------------------------------------------------------------------
.../Analysis/Ar/ArabicLetterTokenizer.cs | 6 +++---
src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs | 2 +-
.../Analysis/CharFilter/NormalizeCharMap.cs | 4 ++--
.../Analysis/Compound/HyphenationCompoundWordTokenFilter.cs | 4 ++--
4 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/829f8ee7/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
index 0e4e28c..c698d5c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
@@ -47,8 +47,8 @@ namespace Lucene.Net.Analysis.Ar
{
/// <summary>
/// Construct a new ArabicLetterTokenizer. </summary>
- /// <param name="matchVersion"> Lucene version
- /// to match See <seealso cref="<a href="#version">above</a>"/>
+ /// <param name="matchVersion"> <see cref="LuceneVersion"/>
+ /// to match
/// </param>
/// <param name="in">
/// the input to split up into tokens </param>
@@ -76,7 +76,7 @@ namespace Lucene.Net.Analysis.Ar
/// <summary>
/// Allows for Letter category or NonspacingMark category </summary>
- /// <seealso cref="LetterTokenizer.IsTokenChar(int)"/>
+ /// <see cref="LetterTokenizer.IsTokenChar(int)"/>
protected override bool IsTokenChar(int c)
{
return base.IsTokenChar(c) || Character.GetType(c) == UnicodeCategory.NonSpacingMark;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/829f8ee7/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
index 704f543..a33ebb6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
@@ -125,7 +125,7 @@ namespace Lucene.Net.Analysis.Ca
/// </summary>
/// <returns> A
/// <see cref="Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
+ /// built from an <see cref="StandardTokenizer"/> filtered with
/// <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>, <see cref="LowerCaseFilter"/>,
/// <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
/// provided and <see cref="SnowballFilter"/>. </returns>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/829f8ee7/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
index bcb031a..110790f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
@@ -108,8 +108,8 @@ namespace Lucene.Net.Analysis.CharFilters
}
/// <summary>
- /// Builds the NormalizeCharMap; call this once you
- /// are done calling <seealso cref="#add"/>.
+ /// Builds the <see cref="NormalizeCharMap"/>; call this once you
+ /// are done calling <see cref="Add"/>.
/// </summary>
public virtual NormalizeCharMap Build()
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/829f8ee7/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
index 83a1a46..533b76e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
@@ -53,7 +53,7 @@ namespace Lucene.Net.Analysis.Compound
/// href="CompoundWordTokenFilterBase.html#version"
/// >CompoundWordTokenFilterBase</a> for details. </param>
/// <param name="input">
- /// the <seealso cref="TokenStream"/> to process </param>
+ /// the <see cref="TokenStream"/> to process </param>
/// <param name="hyphenator">
/// the hyphenation pattern tree to use for hyphenation </param>
/// <param name="dictionary">
@@ -74,7 +74,7 @@ namespace Lucene.Net.Analysis.Compound
/// href="CompoundWordTokenFilterBase.html#version"
/// >CompoundWordTokenFilterBase</a> for details. </param>
/// <param name="input">
- /// the <seealso cref="TokenStream"/> to process </param>
+ /// the <see cref="TokenStream"/> to process </param>
/// <param name="hyphenator">
/// the hyphenation pattern tree to use for hyphenation </param>
/// <param name="dictionary">