You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/04 20:32:36 UTC
[17/39] lucenenet git commit: Lucene.Net.Analysis: Fixed
miscellaneous documentation comment issues
Lucene.Net.Analysis: Fixed miscellaneous documentation comment issues
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/898a818d
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/898a818d
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/898a818d
Branch: refs/heads/api-work
Commit: 898a818db2ae88b0d8527e34f00da1c780a1a8fd
Parents: e67f797
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sat Feb 4 23:04:14 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sat Feb 4 23:08:23 2017 +0700
----------------------------------------------------------------------
.../Analysis/Br/BrazilianAnalyzer.cs | 1 +
.../Analysis/Cjk/CJKBigramFilter.cs | 8 ++
.../Analysis/El/GreekStemmer.cs | 2 +-
.../Analysis/En/KStemmer.cs | 86 +++++++++-----------
.../Analysis/En/PorterStemmer.cs | 4 +-
.../Analysis/Eu/BasqueAnalyzer.cs | 2 +-
.../Analysis/Fr/FrenchStemmer.cs | 2 +
.../Analysis/Hunspell/Dictionary.cs | 3 +
.../Analysis/Hunspell/HunspellStemFilter.cs | 1 +
.../Analysis/Hunspell/Stemmer.cs | 6 ++
.../Analysis/Miscellaneous/PatternAnalyzer.cs | 4 +-
.../Analysis/Ngram/NGramTokenizer.cs | 4 +-
.../Analysis/Nl/DutchStemFilter.cs | 4 +-
.../Analysis/No/NorwegianLightStemFilter.cs | 4 +-
.../Analysis/No/NorwegianMinimalStemFilter.cs | 1 +
.../Analysis/Ro/RomanianAnalyzer.cs | 1 +
16 files changed, 77 insertions(+), 56 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
index b6bd791..df78414 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
@@ -112,6 +112,7 @@ namespace Lucene.Net.Analysis.Br
/// lucene compatibility version </param>
/// <param name="stopwords">
/// a stopword set </param>
+ /// <param name="stemExclusionSet"> a set of terms not to be stemmed </param>
public BrazilianAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords, CharArraySet stemExclusionSet)
: this(matchVersion, stopwords)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
index 443ea04..9a9c707 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
@@ -107,6 +107,8 @@ namespace Lucene.Net.Analysis.Cjk
/// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, int)">
/// CJKBigramFilter(in, HAN | HIRAGANA | KATAKANA | HANGUL)</see>
/// </summary>
+ /// <param name="in">
+ /// Input <see cref="TokenStream"/> </param>
public CJKBigramFilter(TokenStream @in)
: this(@in, HAN | HIRAGANA | KATAKANA | HANGUL)
{
@@ -116,6 +118,10 @@ namespace Lucene.Net.Analysis.Cjk
/// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, int, bool)">
/// CJKBigramFilter(in, flags, false)</see>
/// </summary>
+ /// <param name="in">
+ /// Input <see cref="TokenStream"/> </param>
+ /// <param name="flags"> OR'ed set from <see cref="CJKBigramFilter.HAN"/>, <see cref="CJKBigramFilter.HIRAGANA"/>,
+ /// <see cref="CJKBigramFilter.KATAKANA"/>, <see cref="CJKBigramFilter.HANGUL"/> </param>
public CJKBigramFilter(TokenStream @in, int flags)
: this(@in, flags, false)
{
@@ -124,6 +130,8 @@ namespace Lucene.Net.Analysis.Cjk
/// <summary>
/// Create a new <see cref="CJKBigramFilter"/>, specifying which writing systems should be bigrammed,
/// and whether or not unigrams should also be output. </summary>
+ /// <param name="in">
+ /// Input <see cref="TokenStream"/> </param>
/// <param name="flags"> OR'ed set from <see cref="CJKBigramFilter.HAN"/>, <see cref="CJKBigramFilter.HIRAGANA"/>,
/// <see cref="CJKBigramFilter.KATAKANA"/>, <see cref="CJKBigramFilter.HANGUL"/> </param>
/// <param name="outputUnigrams"> true if unigrams for the selected writing systems should also be output.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
index 4934410..5c6a40a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
@@ -1000,7 +1000,7 @@ namespace Lucene.Net.Analysis.El
/// </summary>
/// <param name="s"> A char[] array that represents a word. </param>
/// <param name="len"> The length of the char[] array. </param>
- /// <param name="suffix"> A <see cref="String"/> object to check if the word given ends with these characters. </param>
+ /// <param name="suffix"> A <see cref="string"/> object to check if the word given ends with these characters. </param>
/// <returns> True if the word ends with the suffix given , false otherwise. </returns>
private bool EndsWith(char[] s, int len, string suffix)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs
index 1808ced..250af5b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemmer.cs
@@ -700,13 +700,13 @@ namespace Lucene.Net.Analysis.En
* common
*/
- /// <summary>
- ///**
- /// YCS: this was the one place where lookup was not followed by return.
- /// So restructure it. if ((j>0)&&(lookup(word.toString())) &&
- /// !((word.CharAt(j) == 's') && (word.CharAt(j-1) == 's'))) return;
- /// ****
- /// </summary>
+
+ //**
+ // YCS: this was the one place where lookup was not followed by return.
+ // So restructure it. if ((j>0)&&(lookup(word.toString())) &&
+ // !((word.CharAt(j) == 's') && (word.CharAt(j-1) == 's'))) return;
+ // ****
+
bool tryE = j > 0 && !((word.CharAt(j) == 's') && (word.CharAt(j - 1) == 's'));
if (tryE && Lookup())
{
@@ -1877,13 +1877,13 @@ namespace Lucene.Net.Analysis.En
{
get
{
- /// <summary>
- ///*
- /// if (!lookups.contains(word.toString())) { throw new
- /// RuntimeException("didn't look up "+word.toString()+" prev="+prevLookup);
- /// }
- /// **
- /// </summary>
+
+ //*
+ // if (!lookups.contains(word.toString())) { throw new
+ // RuntimeException("didn't look up "+word.toString()+" prev="+prevLookup);
+ // }
+ // **
+
// lookup();
return matchedEntry != null;
}
@@ -1916,15 +1916,13 @@ namespace Lucene.Net.Analysis.En
return false;
}
- /// <summary>
- ///*
- /// caching off is normally faster if (cache == null) initializeStemHash();
- ///
- /// // now check the cache, before we copy chars to "word" if (cache != null)
- /// { String val = cache.get(term, 0, len); if (val != null) { if (val !=
- /// SAME) { result = val; return true; } return false; } }
- /// **
- /// </summary>
+ //*
+ // caching off is normally faster if (cache == null) initializeStemHash();
+ //
+ // // now check the cache, before we copy chars to "word" if (cache != null)
+ // { String val = cache.get(term, 0, len); if (val != null) { if (val !=
+ // SAME) { result = val; return true; } return false; } }
+ // **
word.Reset();
// allocate enough space so that an expansion is never needed
@@ -1942,11 +1940,11 @@ namespace Lucene.Net.Analysis.En
}
matchedEntry = null;
- /// <summary>
- ///*
- /// lookups.clear(); lookups.add(word.toString());
- /// **
- /// </summary>
+
+ //*
+ // lookups.clear(); lookups.add(word.toString());
+ // **
+
/*
* This while loop will never be executed more than one time; it is here
@@ -2053,24 +2051,20 @@ namespace Lucene.Net.Analysis.En
result = entry.root; // may be null, which means that "word" is the stem
}
- /// <summary>
- ///*
- /// caching off is normally faster if (cache != null && cache.size() <
- /// maxCacheSize) { char[] key = new char[len]; System.arraycopy(term, 0,
- /// key, 0, len); if (result != null) { cache.put(key, result); } else {
- /// cache.put(key, word.toString()); } }
- /// **
- /// </summary>
-
- /// <summary>
- ///*
- /// if (entry == null) { if (!word.toString().equals(new String(term,0,len)))
- /// { System.out.println("CASE:" + word.toString() + "," + new
- /// String(term,0,len));
- ///
- /// } }
- /// **
- /// </summary>
+ //*
+ // caching off is normally faster if (cache != null && cache.size() <
+ // maxCacheSize) { char[] key = new char[len]; System.arraycopy(term, 0,
+ // key, 0, len); if (result != null) { cache.put(key, result); } else {
+ // cache.put(key, word.toString()); } }
+ // **
+
+ //*
+ // if (entry == null) { if (!word.toString().equals(new String(term,0,len)))
+ // { System.out.println("CASE:" + word.toString() + "," + new
+ // String(term,0,len));
+ //
+ // } }
+ // **
// no entry matched means result is "word"
return true;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs
index d1119c4..707c90c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs
@@ -54,7 +54,7 @@ namespace Lucene.Net.Analysis.En
///
/// The Stemmer class transforms a word into its root form. The input
/// word can be provided a character at time (by calling <see cref="Add"/>), or at once
- /// by calling one of the various <see cref="Stem"/> methods.
+ /// by calling one of the various Stem methods, such as <see cref="Stem(string)"/>.
/// </summary>
internal class PorterStemmer
{
@@ -71,7 +71,7 @@ namespace Lucene.Net.Analysis.En
/// <summary>
/// <see cref="Reset"/> resets the stemmer so it can stem another word. If you invoke
- /// the stemmer by calling <see cref="Add(char)"/> and then <see cref="Stem"/>, you must call <see cref="Reset"/>
+ /// the stemmer by calling <see cref="Add(char)"/> and then <see cref="Stem()"/>, you must call <see cref="Reset"/>
/// before starting another word.
/// </summary>
public virtual void Reset()
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
index d0fa82d..621c6a6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
@@ -108,7 +108,7 @@ namespace Lucene.Net.Analysis.Eu
/// <summary>
/// Creates a
/// <see cref="Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <see cref="Reader"/>.
+ /// which tokenizes all the text in the provided <see cref="TextReader"/>.
/// </summary>
/// <returns> A
/// <see cref="Analyzer.TokenStreamComponents"/>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
index e67a7fb..2735cb0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
@@ -472,6 +472,8 @@ namespace Lucene.Net.Analysis.Fr
/// <param name="search"> the strings to search for suppression </param>
/// <param name="prefix"> the prefix to add to the search string to test </param>
/// <param name="without"> true if it will be deleted even without prefix found </param>
+ /// <param name="from"> the secondary source zone for search </param>
+ /// <param name="replace"> the replacement string </param>
private void DeleteButSuffixFromElseReplace(string source, string[] search, string prefix, bool without, string from, string replace)
{
if (source != null)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
index 29938e5..a5276f7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
@@ -125,6 +125,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// </summary>
/// <param name="affix"> <see cref="Stream"/> for reading the hunspell affix file (won't be disposed). </param>
/// <param name="dictionaries"> <see cref="Stream"/> for reading the hunspell dictionary files (won't be disposed). </param>
+ /// <param name="ignoreCase"> ignore case? </param>
/// <exception cref="IOException"> Can be thrown while reading from the <see cref="Stream"/>s </exception>
/// <exception cref="Exception"> Can be thrown if the content of the files does not meet expected formats </exception>
public Dictionary(Stream affix, IList<Stream> dictionaries, bool ignoreCase)
@@ -399,6 +400,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// <param name="conditionPattern"> <see cref="string.Format(string, object[])"/> pattern to be used to generate the condition regex
/// pattern </param>
/// <param name="seenPatterns"> map from condition -> index of patterns, for deduplication. </param>
+ /// <param name="seenStrips"></param>
/// <exception cref="IOException"> Can be thrown while reading the rule </exception>
private void ParseAffix(SortedDictionary<string, IList<char?>> affixes, string header, TextReader reader, string conditionPattern, IDictionary<string, int?> seenPatterns, IDictionary<string, int?> seenStrips)
{
@@ -733,6 +735,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// </summary>
/// <param name="dictionaries"> <see cref="Stream"/>s to read the dictionary file through </param>
/// <param name="decoder"> <see cref="Encoding"/> used to decode the contents of the file </param>
+ /// <param name="words"></param>
/// <exception cref="IOException"> Can be thrown while reading from the file </exception>
private void ReadDictionaryFiles(IList<Stream> dictionaries, Encoding decoder, Builder<IntsRef> words)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
index 0135fad..d7f3c97 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
@@ -73,6 +73,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// </summary>
/// <param name="input"> <see cref="TokenStream"/> whose tokens will be stemmed </param>
/// <param name="dictionary"> Hunspell <see cref="Dictionary"/> containing the affix rules and words that will be used to stem the tokens </param>
+ /// <param name="dedup"> remove duplicates </param>
/// <param name="longestOnly"> true if only the longest term should be output. </param>
public HunspellStemFilter(TokenStream input, Dictionary dictionary, bool dedup, bool longestOnly)
: base(input)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs
index 60be661..3b2d0d4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Stemmer.cs
@@ -66,6 +66,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// Find the stem(s) of the provided word
/// </summary>
/// <param name="word"> Word to find the stems for </param>
+ /// <param name="length"> length </param>
/// <returns> <see cref="IList{CharsRef}"/> of stems for the word </returns>
public IList<CharsRef> Stem(char[] word, int length)
{
@@ -100,6 +101,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// Find the unique stem(s) of the provided word
/// </summary>
/// <param name="word"> Word to find the stems for </param>
+ /// <param name="length"> length </param>
/// <returns> <see cref="IList{CharsRef}"/> of stems for the word </returns>
public IList<CharsRef> UniqueStems(char[] word, int length)
{
@@ -154,6 +156,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// Generates a list of stems for the provided word
/// </summary>
/// <param name="word"> Word to generate the stems for </param>
+ /// <param name="length"> length </param>
/// <param name="previous"> previous affix that was removed (so we dont remove same one twice) </param>
/// <param name="prevFlag"> Flag from a previous stemming step that need to be cross-checked with any affixes in this recursive step </param>
/// <param name="prefixFlag"> flag of the most inner removed prefix, so that when removing a suffix, its also checked against the word </param>
@@ -354,6 +357,8 @@ namespace Lucene.Net.Analysis.Hunspell
/// so we must check dictionary form against both to add it as a stem! </param>
/// <param name="recursionDepth"> current recursion depth </param>
/// <param name="prefix"> true if we are removing a prefix (false if its a suffix) </param>
+ /// <param name="circumfix"> true if the previous prefix removal was signed as a circumfix
+ /// this means inner most suffix must also contain circumfix flag. </param>
/// <returns> <see cref="IList{CharsRef}"/> of stems for the word, or an empty list if none are found </returns>
internal IList<CharsRef> ApplyAffix(char[] strippedWord, int length, int affix, int prefixFlag, int recursionDepth, bool prefix, bool circumfix)
{
@@ -458,6 +463,7 @@ namespace Lucene.Net.Analysis.Hunspell
/// </summary>
/// <param name="flag"> Flag to cross check with the array of flags </param>
/// <param name="flags"> Array of flags to cross check against. Can be <c>null</c> </param>
+ /// <param name="matchEmpty"> If true, will match a zero length flags array. </param>
/// <returns> <c>true</c> if the flag is found in the array or the array is <c>null</c>, <c>false</c> otherwise </returns>
private bool HasCrossCheckedFlag(char flag, char[] flags, bool matchEmpty)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
index f943762..59ce195 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// Efficient Lucene analyzer/tokenizer that preferably operates on a <see cref="string"/> rather than a
/// <see cref="TextReader"/>, that can flexibly separate text into terms via a regular expression <see cref="Regex"/>
- /// (with behaviour similar to <see cref="string.Split(string)"/>),
+ /// (with behaviour similar to <see cref="string.Split(string[], StringSplitOptions)"/>),
/// and that combines the functionality of
/// <see cref="LetterTokenizer"/>,
/// <see cref="LowerCaseTokenizer"/>,
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <para>
/// If you are unsure how exactly a regular expression should look like, consider
/// prototyping by simply trying various expressions on some test texts via
- /// <see cref="string.Split(char[])"/>. Once you are satisfied, give that regex to
+ /// <see cref="string.Split(string[], StringSplitOptions)"/>. Once you are satisfied, give that regex to
/// <see cref="PatternAnalyzer"/>. Also see <a target="_blank"
/// href="http://www.regular-expressions.info/">Regular Expression Tutorial</a>.
/// </para>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
index a6ce01d..b37d290 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
@@ -223,7 +223,7 @@ namespace Lucene.Net.Analysis.Ngram // LUCENENET TODO: Change namespace, directo
// fill in remaining space
exhausted = !charUtils.Fill(charBuffer, m_input, buffer.Length - bufferEnd);
// convert to code points
- bufferEnd += charUtils.toCodePoints(charBuffer.Buffer, 0, charBuffer.Length, buffer, bufferEnd);
+ bufferEnd += charUtils.ToCodePoints(charBuffer.Buffer, 0, charBuffer.Length, buffer, bufferEnd);
}
// should we go to the next offset?
@@ -250,7 +250,7 @@ namespace Lucene.Net.Analysis.Ngram // LUCENENET TODO: Change namespace, directo
continue;
}
- int length = charUtils.toChars(buffer, bufferStart, gramSize, termAtt.Buffer, 0);
+ int length = charUtils.ToChars(buffer, bufferStart, gramSize, termAtt.Buffer, 0);
termAtt.Length = length;
posIncAtt.PositionIncrement = 1;
posLenAtt.PositionLength = 1;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
index 9c280bf..8fd66ac 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
@@ -50,13 +50,15 @@ namespace Lucene.Net.Analysis.Nl
private readonly ICharTermAttribute termAtt;
private readonly IKeywordAttribute keywordAttr;
+ /// <param name="in"> Input <see cref="TokenStream"/> </param>
public DutchStemFilter(TokenStream @in)
- : base(@in)
+ : base(@in)
{
termAtt = AddAttribute<ICharTermAttribute>();
keywordAttr = AddAttribute<IKeywordAttribute>();
}
+ /// <param name="in"> Input <see cref="TokenStream"/> </param>
/// <param name="stemdictionary"> Dictionary of word stem pairs, that overrule the algorithm </param>
public DutchStemFilter(TokenStream @in, IDictionary<string, string> stemdictionary)
: this(@in)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
index 6fb788a..9681e1e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
@@ -38,13 +38,15 @@ namespace Lucene.Net.Analysis.No
/// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, int)"/>
/// - NorwegianLightStemFilter(input, BOKMAAL)
/// </summary>
+ /// <param name="input"> the source <see cref="TokenStream"/> to filter </param>
public NorwegianLightStemFilter(TokenStream input)
- : this(input, NorwegianLightStemmer.BOKMAAL)
+ : this(input, NorwegianLightStemmer.BOKMAAL)
{
}
/// <summary>
/// Creates a new <see cref="NorwegianLightStemFilter"/> </summary>
+ /// <param name="input"> the source <see cref="TokenStream"/> to filter </param>
/// <param name="flags"> set to <see cref="NorwegianLightStemmer.BOKMAAL"/>,
/// <see cref="NorwegianLightStemmer.NYNORSK"/>, or both. </param>
public NorwegianLightStemFilter(TokenStream input, int flags)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
index 520425d..446bf3a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
@@ -45,6 +45,7 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new <see cref="NorwegianLightStemFilter"/> </summary>
+ /// <param name="input"> the source <see cref="TokenStream"/> to filter </param>
/// <param name="flags"> set to <see cref="NorwegianLightStemmer.BOKMAAL"/>,
/// <see cref="NorwegianLightStemmer.NYNORSK"/>, or both. </param>
public NorwegianMinimalStemFilter(TokenStream input, int flags)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/898a818d/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
index af4161c..dca59e8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
@@ -81,6 +81,7 @@ namespace Lucene.Net.Analysis.Ro
/// <summary>
/// Builds an analyzer with the default stop words: <see cref="DEFAULT_STOPWORD_FILE"/>.
/// </summary>
+ /// <param name="matchVersion"> lucene compatibility version </param>
public RomanianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
{