You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/03 09:33:19 UTC
[8/9] lucenenet git commit: Lucene.Net.Analysis.Hunspell refactor:
member accessibility and documentation comments
Lucene.Net.Analysis.Hunspell refactor: member accessibility and documentation comments
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/da08b94a
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/da08b94a
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/da08b94a
Branch: refs/heads/api-work
Commit: da08b94a1c8cecb77d5fecfa43448987a5592a67
Parents: 50853b4
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 16:26:10 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 16:26:10 2017 +0700
----------------------------------------------------------------------
.../Analysis/Hunspell/Dictionary.cs | 92 ++++++++++----------
.../Analysis/Hunspell/HunspellStemFilter.cs | 28 +++---
.../Hunspell/HunspellStemFilterFactory.cs | 7 +-
.../Analysis/Hunspell/Stemmer.cs | 21 +++--
4 files changed, 73 insertions(+), 75 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/da08b94a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
index 8795529..780fe1c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
@@ -36,7 +36,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// </summary>
public class Dictionary
{
- internal static readonly char[] NOFLAGS = new char[0];
+ private static readonly char[] NOFLAGS = new char[0];
private const string ALIAS_KEY = "AF";
private const string PREFIX_KEY = "PFX";
@@ -105,28 +105,28 @@ namespace Lucene.Net.Analysis.Hunspell
private static Regex whitespacePattern = new Regex("\\s+", RegexOptions.Compiled);
/// <summary>
- /// Creates a new Dictionary containing the information read from the provided InputStreams to hunspell affix
+ /// Creates a new <see cref="Dictionary"/> containing the information read from the provided <see cref="Stream"/>s to hunspell affix
/// and dictionary files.
- /// You have to close the provided InputStreams yourself.
+ /// You have to dispose the provided <see cref="Stream"/>s yourself.
/// </summary>
- /// <param name="affix"> InputStream for reading the hunspell affix file (won't be closed). </param>
- /// <param name="dictionary"> InputStream for reading the hunspell dictionary file (won't be closed). </param>
- /// <exception cref="IOException"> Can be thrown while reading from the InputStreams </exception>
- /// <exception cref="ParseException"> Can be thrown if the content of the files does not meet expected formats </exception>
+ /// <param name="affix"> <see cref="Stream"/> for reading the hunspell affix file (won't be disposed). </param>
+ /// <param name="dictionary"> <see cref="Stream"/> for reading the hunspell dictionary file (won't be disposed). </param>
+ /// <exception cref="IOException"> Can be thrown while reading from the <see cref="Stream"/>s </exception>
+ /// <exception cref="Exception"> Can be thrown if the content of the files does not meet expected formats </exception>
public Dictionary(Stream affix, Stream dictionary)
: this(affix, new List<Stream>() { dictionary }, false)
{
}
/// <summary>
- /// Creates a new Dictionary containing the information read from the provided InputStreams to hunspell affix
+ /// Creates a new <see cref="Dictionary"/> containing the information read from the provided <see cref="Stream"/>s to hunspell affix
/// and dictionary files.
- /// You have to close the provided InputStreams yourself.
+ /// You have to dispose the provided <see cref="Stream"/>s yourself.
/// </summary>
- /// <param name="affix"> InputStream for reading the hunspell affix file (won't be closed). </param>
- /// <param name="dictionaries"> InputStream for reading the hunspell dictionary files (won't be closed). </param>
- /// <exception cref="IOException"> Can be thrown while reading from the InputStreams </exception>
- /// <exception cref="ParseException"> Can be thrown if the content of the files does not meet expected formats </exception>
+ /// <param name="affix"> <see cref="Stream"/> for reading the hunspell affix file (won't be disposed). </param>
+ /// <param name="dictionaries"> <see cref="Stream"/> for reading the hunspell dictionary files (won't be disposed). </param>
+ /// <exception cref="IOException"> Can be thrown while reading from the <see cref="Stream"/>s </exception>
+ /// <exception cref="Exception"> Can be thrown if the content of the files does not meet expected formats </exception>
public Dictionary(Stream affix, IList<Stream> dictionaries, bool ignoreCase)
{
this.ignoreCase = ignoreCase;
@@ -181,24 +181,24 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Looks up HunspellAffix prefixes that have an append that matches the String created from the given char array, offset and length
+ /// Looks up HunspellAffix prefixes that have an append that matches the <see cref="string"/> created from the given <see cref="char"/> array, offset and length
/// </summary>
- /// <param name="word"> Char array to generate the String from </param>
- /// <param name="offset"> Offset in the char array that the String starts at </param>
- /// <param name="length"> Length from the offset that the String is </param>
- /// <returns> List of HunspellAffix prefixes with an append that matches the String, or {@code null} if none are found </returns>
+ /// <param name="word"> <see cref="char"/> array to generate the <see cref="string"/> from </param>
+ /// <param name="offset"> Offset in the <see cref="char"/> array that the <see cref="string"/> starts at </param>
+ /// <param name="length"> Length from the offset that the <see cref="string"/> is </param>
+ /// <returns> List of HunspellAffix prefixes with an append that matches the <see cref="string"/>, or <c>null</c> if none are found </returns>
internal virtual IntsRef LookupPrefix(char[] word, int offset, int length)
{
return Lookup(prefixes, word, offset, length);
}
/// <summary>
- /// Looks up HunspellAffix suffixes that have an append that matches the String created from the given char array, offset and length
+ /// Looks up HunspellAffix suffixes that have an append that matches the <see cref="string"/> created from the given <see cref="char"/> array, offset and length
/// </summary>
- /// <param name="word"> Char array to generate the String from </param>
- /// <param name="offset"> Offset in the char array that the String starts at </param>
- /// <param name="length"> Length from the offset that the String is </param>
- /// <returns> List of HunspellAffix suffixes with an append that matches the String, or {@code null} if none are found </returns>
+ /// <param name="word"> <see cref="char"/> array to generate the <see cref="string"/> from </param>
+ /// <param name="offset"> Offset in the char array that the <see cref="string"/> starts at </param>
+ /// <param name="length"> Length from the offset that the <see cref="string"/> is </param>
+ /// <returns> List of HunspellAffix suffixes with an append that matches the <see cref="string"/>, or <c>null</c> if none are found </returns>
internal virtual IntsRef LookupSuffix(char[] word, int offset, int length)
{
return Lookup(suffixes, word, offset, length);
@@ -253,10 +253,10 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Reads the affix file through the provided InputStream, building up the prefix and suffix maps
+ /// Reads the affix file through the provided <see cref="Stream"/>, building up the prefix and suffix maps
/// </summary>
- /// <param name="affixStream"> InputStream to read the content of the affix file from </param>
- /// <param name="decoder"> CharsetDecoder to decode the content of the file </param>
+ /// <param name="affixStream"> <see cref="Stream"/> to read the content of the affix file from </param>
+ /// <param name="decoder"> <see cref="Encoding"/> to decode the content of the file </param>
/// <exception cref="IOException"> Can be thrown while reading from the InputStream </exception>
private void ReadAffixFile(Stream affixStream, Encoding decoder)
{
@@ -393,16 +393,15 @@ namespace Lucene.Net.Analysis.Hunspell
/// <summary>
/// Parses a specific affix rule putting the result into the provided affix map
/// </summary>
- /// <param name="affixes"> Map where the result of the parsing will be put </param>
+ /// <param name="affixes"> <see cref="SortedDictionary{string, IList{char?}}"/> where the result of the parsing will be put </param>
/// <param name="header"> Header line of the affix rule </param>
- /// <param name="reader"> BufferedReader to read the content of the rule from </param>
- /// <param name="conditionPattern"> <see cref="String#format(String, Object...)"/> pattern to be used to generate the condition regex
+ /// <param name="reader"> <see cref="TextReader"/> to read the content of the rule from </param>
+ /// <param name="conditionPattern"> <see cref="string.Format(string, object[])"/> pattern to be used to generate the condition regex
/// pattern </param>
/// <param name="seenPatterns"> map from condition -> index of patterns, for deduplication. </param>
/// <exception cref="IOException"> Can be thrown while reading the rule </exception>
private void ParseAffix(SortedDictionary<string, IList<char?>> affixes, string header, TextReader reader, string conditionPattern, IDictionary<string, int?> seenPatterns, IDictionary<string, int?> seenStrips)
{
-
BytesRef scratch = new BytesRef();
StringBuilder sb = new StringBuilder();
string[] args = whitespacePattern.Split(header);
@@ -580,12 +579,12 @@ namespace Lucene.Net.Analysis.Hunspell
internal static readonly Regex ENCODING_PATTERN = new Regex("^(\u00EF\u00BB\u00BF)?SET\\s+", RegexOptions.Compiled);
/// <summary>
- /// Parses the encoding specified in the affix file readable through the provided InputStream
+ /// Parses the encoding specified in the affix file readable through the provided <see cref="Stream"/>
/// </summary>
- /// <param name="affix"> InputStream for reading the affix file </param>
+ /// <param name="affix"> <see cref="Stream"/> for reading the affix file </param>
/// <returns> Encoding specified in the affix file </returns>
- /// <exception cref="IOException"> Can be thrown while reading from the InputStream </exception>
- /// <exception cref="ParseException"> Thrown if the first non-empty non-comment line read from the file does not adhere to the format {@code SET <encoding>} </exception>
+ /// <exception cref="IOException"> Can be thrown while reading from the <see cref="Stream"/> </exception>
+ /// <exception cref="Exception"> Thrown if the first non-empty non-comment line read from the file does not adhere to the format <c>SET <encoding></c></exception>
internal static string GetDictionaryEncoding(Stream affix)
{
StringBuilder encoding = new StringBuilder();
@@ -632,11 +631,11 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Retrieves the CharsetDecoder for the given encoding. Note, This isn't perfect as I think ISCII-DEVANAGARI and
+ /// Retrieves the <see cref="Encoding"/> for the given encoding. Note, This isn't perfect as I think ISCII-DEVANAGARI and
/// MICROSOFT-CP1251 etc are allowed...
/// </summary>
- /// <param name="encoding"> Encoding to retrieve the CharsetDecoder for </param>
- /// <returns> CharSetDecoder for the given encoding </returns>
+ /// <param name="encoding"> Encoding to retrieve the <see cref="Encoding"/> instance for </param>
+ /// <returns> <see cref="Encoding"/> for the given encoding <see cref="string"/> </returns>
// LUCENENET NOTE: This was getJavaEncoding in the original
private Encoding GetSystemEncoding(string encoding)
{
@@ -678,7 +677,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// Determines the appropriate <see cref="FlagParsingStrategy"/> based on the FLAG definition line taken from the affix file
/// </summary>
/// <param name="flagLine"> Line containing the flag information </param>
- /// <returns> FlagParsingStrategy that handles parsing flags in the way specified in the FLAG definition </returns>
+ /// <returns> <see cref="FlagParsingStrategy"/> that handles parsing flags in the way specified in the FLAG definition </returns>
internal static FlagParsingStrategy GetFlagParsingStrategy(string flagLine)
{
string[] parts = whitespacePattern.Split(flagLine);
@@ -730,10 +729,10 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Reads the dictionary file through the provided InputStreams, building up the words map
+ /// Reads the dictionary file through the provided <see cref="Stream"/>s, building up the words map
/// </summary>
- /// <param name="dictionaries"> InputStreams to read the dictionary file through </param>
- /// <param name="decoder"> CharsetDecoder used to decode the contents of the file </param>
+ /// <param name="dictionaries"> <see cref="Stream"/>s to read the dictionary file through </param>
+ /// <param name="decoder"> <see cref="Encoding"/> used to decode the contents of the file </param>
/// <exception cref="IOException"> Can be thrown while reading from the file </exception>
private void ReadDictionaryFiles(IList<Stream> dictionaries, Encoding decoder, Builder<IntsRef> words)
{
@@ -1011,11 +1010,10 @@ namespace Lucene.Net.Analysis.Hunspell
/// </summary>
internal abstract class FlagParsingStrategy
{
-
/// <summary>
- /// Parses the given String into a single flag
+ /// Parses the given <see cref="string"/> into a single flag
/// </summary>
- /// <param name="rawFlag"> String to parse into a flag </param>
+ /// <param name="rawFlag"> <see cref="string"/> to parse into a flag </param>
/// <returns> Parsed flag </returns>
internal virtual char ParseFlag(string rawFlag)
{
@@ -1028,15 +1026,15 @@ namespace Lucene.Net.Analysis.Hunspell
}
/// <summary>
- /// Parses the given String into multiple flags
+ /// Parses the given <see cref="string"/> into multiple flags
/// </summary>
- /// <param name="rawFlags"> String to parse into flags </param>
+ /// <param name="rawFlags"> <see cref="string"/> to parse into flags </param>
/// <returns> Parsed flags </returns>
internal abstract char[] ParseFlags(string rawFlags);
}
/// <summary>
- /// Simple implementation of <see cref="FlagParsingStrategy"/> that treats the chars in each String as a individual flags.
+ /// Simple implementation of <see cref="FlagParsingStrategy"/> that treats the chars in each <see cref="string"/> as a individual flags.
/// Can be used with both the ASCII and UTF-8 flag types.
/// </summary>
private class SimpleFlagParsingStrategy : FlagParsingStrategy
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/da08b94a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
index da38ef8..0135fad 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
@@ -22,24 +22,24 @@ namespace Lucene.Net.Analysis.Hunspell
*/
/// <summary>
- /// TokenFilter that uses hunspell affix rules and words to stem tokens. Since hunspell supports a word having multiple
- /// stems, this filter can emit multiple tokens for each consumed token
+ /// <see cref="TokenFilter"/> that uses hunspell affix rules and words to stem tokens.
+ /// Since hunspell supports a word having multiple stems, this filter can emit
+ /// multiple tokens for each consumed token
///
/// <para>
/// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
/// certain terms from being passed to the stemmer
- /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+ /// <see cref="KeywordAttribute.IsKeyword"/> should be set to <c>true</c>
/// in a previous <see cref="TokenStream"/>.
///
/// Note: For including the original term as well as the stemmed version, see
- /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+ /// <see cref="Miscellaneous.KeywordRepeatFilterFactory"/>
/// </para>
///
/// @lucene.experimental
/// </summary>
public sealed class HunspellStemFilter : TokenFilter
{
-
private readonly ICharTermAttribute termAtt;
private readonly IPositionIncrementAttribute posIncAtt;
private readonly IKeywordAttribute keywordAtt;
@@ -53,29 +53,29 @@ namespace Lucene.Net.Analysis.Hunspell
/// <summary>
/// Create a <see cref="HunspellStemFilter"/> outputting all possible stems. </summary>
- /// <seealso cref= #HunspellStemFilter(TokenStream, Dictionary, boolean) </seealso>
+ /// <seealso cref="HunspellStemFilter(TokenStream, Dictionary, bool)"/>
public HunspellStemFilter(TokenStream input, Dictionary dictionary)
- : this(input, dictionary, true)
+ : this(input, dictionary, true)
{
}
/// <summary>
/// Create a <see cref="HunspellStemFilter"/> outputting all possible stems. </summary>
- /// <seealso cref= #HunspellStemFilter(TokenStream, Dictionary, boolean, boolean) </seealso>
+ /// <seealso cref="HunspellStemFilter(TokenStream, Dictionary, bool, bool)"/>
public HunspellStemFilter(TokenStream input, Dictionary dictionary, bool dedup)
- : this(input, dictionary, dedup, false)
+ : this(input, dictionary, dedup, false)
{
}
/// <summary>
- /// Creates a new HunspellStemFilter that will stem tokens from the given TokenStream using affix rules in the provided
+ /// Creates a new HunspellStemFilter that will stem tokens from the given <see cref="TokenStream"/> using affix rules in the provided
/// Dictionary
/// </summary>
- /// <param name="input"> TokenStream whose tokens will be stemmed </param>
- /// <param name="dictionary"> HunspellDictionary containing the affix rules and words that will be used to stem the tokens </param>
+ /// <param name="input"> <see cref="TokenStream"/> whose tokens will be stemmed </param>
+ /// <param name="dictionary"> Hunspell <see cref="Dictionary"/> containing the affix rules and words that will be used to stem the tokens </param>
/// <param name="longestOnly"> true if only the longest term should be output. </param>
- public HunspellStemFilter(TokenStream input, Dictionary dictionary, bool dedup, bool longestOnly) :
- base(input)
+ public HunspellStemFilter(TokenStream input, Dictionary dictionary, bool dedup, bool longestOnly)
+ : base(input)
{
this.dedup = dedup && longestOnly == false; // don't waste time deduping if longestOnly is set
this.stemmer = new Stemmer(dictionary);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/da08b94a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
index 4615260..7546740 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Hunspell
*/
/// <summary>
- /// TokenFilterFactory that creates instances of <see cref="HunspellStemFilter"/>.
+ /// <see cref="TokenFilterFactory"/> that creates instances of <see cref="HunspellStemFilter"/>.
/// Example config for British English:
/// <code>
/// <filter class="solr.HunspellStemFilterFactory"
@@ -53,8 +53,9 @@ namespace Lucene.Net.Analysis.Hunspell
private Dictionary dictionary;
/// <summary>
- /// Creates a new HunspellStemFilterFactory </summary>
- public HunspellStemFilterFactory(IDictionary<string, string> args) : base(args)
+ /// Creates a new <see cref="HunspellStemFilterFactory"/> </summary>
+ public HunspellStemFilterFactory(IDictionary<string, string> args)
+ : base(args)
{
dictionaryFiles = Require(args, PARAM_DICTIONARY);
affixFile = Get(args, PARAM_AFFIX);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/da08b94a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs
index 30c052d..60be661 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs
@@ -1,6 +1,5 @@
\ufeffusing Lucene.Net.Analysis.Util;
using Lucene.Net.Store;
-using Lucene.Net.Support;
using Lucene.Net.Util;
using Lucene.Net.Util.Automaton;
using System;
@@ -29,7 +28,7 @@ namespace Lucene.Net.Analysis.Hunspell
*/
/// <summary>
- /// Stemmer uses the affix rules declared in the Dictionary to generate one or more stems for a word. It
+ /// Stemmer uses the affix rules declared in the <see cref="Dictionary"/> to generate one or more stems for a word. It
/// conforms to the algorithm in the original hunspell algorithm, including recursive suffix stripping.
/// </summary>
internal sealed class Stemmer
@@ -44,9 +43,9 @@ namespace Lucene.Net.Analysis.Hunspell
private char[] scratchBuffer = new char[32];
/// <summary>
- /// Constructs a new Stemmer which will use the provided Dictionary to create its stems.
+ /// Constructs a new Stemmer which will use the provided <see cref="Dictionary"/> to create its stems.
/// </summary>
- /// <param name="dictionary"> Dictionary that will be used to create the stems </param>
+ /// <param name="dictionary"> <see cref="Dictionary"/> that will be used to create the stems </param>
public Stemmer(Dictionary dictionary)
{
this.dictionary = dictionary;
@@ -57,7 +56,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// Find the stem(s) of the provided word.
/// </summary>
/// <param name="word"> Word to find the stems for </param>
- /// <returns> List of stems for the word </returns>
+ /// <returns> <see cref="IList{CharsRef}"/> of stems for the word </returns>
public IList<CharsRef> Stem(string word)
{
return Stem(word.ToCharArray(), word.Length);
@@ -67,7 +66,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// Find the stem(s) of the provided word
/// </summary>
/// <param name="word"> Word to find the stems for </param>
- /// <returns> List of stems for the word </returns>
+ /// <returns> <see cref="IList{CharsRef}"/> of stems for the word </returns>
public IList<CharsRef> Stem(char[] word, int length)
{
@@ -101,7 +100,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// Find the unique stem(s) of the provided word
/// </summary>
/// <param name="word"> Word to find the stems for </param>
- /// <returns> List of stems for the word </returns>
+ /// <returns> <see cref="IList{CharsRef}"/> of stems for the word </returns>
public IList<CharsRef> UniqueStems(char[] word, int length)
{
IList<CharsRef> stems = Stem(word, length);
@@ -166,7 +165,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// but two prefixes (COMPLEXPREFIXES) or two suffixes must have continuation requirements to recurse. </param>
/// <param name="circumfix"> true if the previous prefix removal was signed as a circumfix
/// this means inner most suffix must also contain circumfix flag. </param>
- /// <returns> List of stems, or empty list if no stems are found </returns>
+ /// <returns> <see cref="IList{CharsRef}"/> of stems, or empty list if no stems are found </returns>
private IList<CharsRef> Stem(char[] word, int length, int previous, int prevFlag, int prefixFlag, int recursionDepth, bool doPrefix, bool doSuffix, bool previousWasPrefix, bool circumfix)
{
@@ -355,7 +354,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// so we must check dictionary form against both to add it as a stem! </param>
/// <param name="recursionDepth"> current recursion depth </param>
/// <param name="prefix"> true if we are removing a prefix (false if its a suffix) </param>
- /// <returns> List of stems for the word, or an empty list if none are found </returns>
+ /// <returns> <see cref="IList{CharsRef}"/> of stems for the word, or an empty list if none are found </returns>
internal IList<CharsRef> ApplyAffix(char[] strippedWord, int length, int affix, int prefixFlag, int recursionDepth, bool prefix, bool circumfix)
{
// TODO: just pass this in from before, no need to decode it twice
@@ -458,8 +457,8 @@ namespace Lucene.Net.Analysis.Hunspell
/// Checks if the given flag cross checks with the given array of flags
/// </summary>
/// <param name="flag"> Flag to cross check with the array of flags </param>
- /// <param name="flags"> Array of flags to cross check against. Can be {@code null} </param>
- /// <returns> {@code true} if the flag is found in the array or the array is {@code null}, {@code false} otherwise </returns>
+ /// <param name="flags"> Array of flags to cross check against. Can be <c>null</c> </param>
+ /// <returns> <c>true</c> if the flag is found in the array or the array is <c>null</c>, <c>false</c> otherwise </returns>
private bool HasCrossCheckedFlag(char flag, char[] flags, bool matchEmpty)
{
return (flags.Length == 0 && matchEmpty) || Array.BinarySearch(flags, flag) >= 0;