You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/03 04:41:52 UTC
[05/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs
index 4bf284f..b2dc6cf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Path
///
/// <pre>
/// /something/something/else
- /// </pre>
+ /// </code>
///
/// and make:
///
@@ -36,7 +36,7 @@ namespace Lucene.Net.Analysis.Path
/// /something
/// /something/something
/// /something/something/else
- /// </pre>
+ /// </code>
/// </para>
/// </summary>
public class PathHierarchyTokenizer : Tokenizer
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs
index fa14bef..c25239b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Path/PathHierarchyTokenizerFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Path
*/
/// <summary>
- /// Factory for <seealso cref="PathHierarchyTokenizer"/>.
+ /// Factory for <see cref="PathHierarchyTokenizer"/>.
/// <para>
/// This factory is typically configured for use only in the <code>index</code>
/// Analyzer (or only in the <code>query</code> Analyzer, but never both).
@@ -37,7 +37,7 @@ namespace Lucene.Net.Analysis.Path
/// <code>Books/Fic</code>...
/// </para>
///
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="descendent_path" class="solr.TextField">
/// <analyzer type="index">
/// <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
@@ -46,7 +46,7 @@ namespace Lucene.Net.Analysis.Path
/// <tokenizer class="solr.KeywordTokenizerFactory" />
/// </analyzer>
/// </fieldType>
- /// </pre>
+ /// </code>
/// <para>
/// In this example however we see the oposite configuration, so that a query
/// for <code>Books/NonFic/Science/Physics</code> would match documents
@@ -55,7 +55,7 @@ namespace Lucene.Net.Analysis.Path
/// <code>Books/NonFic/Science/Physics/Theory</code> or
/// <code>Books/NonFic/Law</code>.
/// </para>
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="descendent_path" class="solr.TextField">
/// <analyzer type="index">
/// <tokenizer class="solr.KeywordTokenizerFactory" />
@@ -64,7 +64,7 @@ namespace Lucene.Net.Analysis.Path
/// <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
/// </analyzer>
/// </fieldType>
- /// </pre>
+ /// </code>
/// </summary>
public class PathHierarchyTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
index 0df60fd..8def15d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Path
///
/// <pre>
/// www.site.co.uk
- /// </pre>
+ /// </code>
///
/// and make:
///
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Path
/// site.co.uk
/// co.uk
/// uk
- /// </pre>
+ /// </code>
///
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
index 9887315..a4154db 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Pattern
*/
/// <summary>
- /// Factory for <seealso cref="PatternCaptureGroupTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PatternCaptureGroupTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_ptncapturegroup" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.KeywordTokenizerFactory"/>
/// <filter class="solr.PatternCaptureGroupFilterFactory" pattern="([^a-z])" preserve_original="true"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
/// <seealso cref= PatternCaptureGroupTokenFilter </seealso>
public class PatternCaptureGroupFilterFactory : TokenFilterFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
index 49aaf7e..0f7a367 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternCaptureGroupTokenFilter.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Pattern
/// </code>
/// </para>
/// <para>
- /// plus if <seealso cref="#preserveOriginal"/> is true, it would also return
+ /// plus if <see cref="#preserveOriginal"/> is true, it would also return
/// <code>"camelCaseFilter</code>
/// </para>
/// </summary>
@@ -79,12 +79,12 @@ namespace Lucene.Net.Analysis.Pattern
private int currentMatcher;
/// <param name="input">
- /// the input <seealso cref="TokenStream"/> </param>
+ /// the input <see cref="TokenStream"/> </param>
/// <param name="preserveOriginal">
/// set to true to return the original token even if one of the
/// patterns matches </param>
/// <param name="patterns">
- /// an array of <seealso cref="Pattern"/> objects to match against each token </param>
+ /// an array of <see cref="Pattern"/> objects to match against each token </param>
public PatternCaptureGroupTokenFilter(TokenStream input, bool preserveOriginal, params Regex[] patterns) : base(input)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs
index 1d3a987..ffa4121 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceCharFilterFactory.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.Pattern
*/
/// <summary>
- /// Factory for <seealso cref="PatternReplaceCharFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PatternReplaceCharFilter"/>.
+ /// <code>
/// <fieldType name="text_ptnreplace" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <charFilter class="solr.PatternReplaceCharFilterFactory"
/// pattern="([^a-z])" replacement=""/>
/// <tokenizer class="solr.KeywordTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
///
/// @since Solr 3.1
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs
index 0fdb959..72c0b82 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilter.cs
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis.Pattern
/// Constructs an instance to replace either the first, or all occurances
/// </summary>
/// <param name="in"> the TokenStream to process </param>
- /// <param name="pattern"> the pattern (a <seealso cref="Regex"/> object) to apply to each Token </param>
+ /// <param name="pattern"> the pattern (a <see cref="Regex"/> object) to apply to each Token </param>
/// <param name="replacement"> the "replacement string" to substitute, if null a
/// blank string will be used. Note that this is not the literal
/// string that will be used, '$' and '\' have special meaning. </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs
index 50ed216..b0e3253 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternReplaceFilterFactory.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.Pattern
*/
/// <summary>
- /// Factory for <seealso cref="PatternReplaceFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PatternReplaceFilter"/>.
+ /// <code>
/// <fieldType name="text_ptnreplace" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.KeywordTokenizerFactory"/>
/// <filter class="solr.PatternReplaceFilterFactory" pattern="([^a-z])" replacement=""
/// replace="all"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
/// <seealso cref= PatternReplaceFilter </seealso>
public class PatternReplaceFilterFactory : TokenFilterFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
index c3fa237..d0f80a8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Pattern
/// <para>
/// group=-1 (the default) is equivalent to "split". In this case, the tokens will
/// be equivalent to the output from (without empty tokens):
- /// <seealso cref="String#split(java.lang.String)"/>
+ /// <see cref="String#split(java.lang.String)"/>
/// </para>
/// <para>
/// Using group >= 0 selects the matching group as the token. For example, if you have:<br/>
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Pattern
/// pattern = \'([^\']+)\'
/// group = 0
/// input = aaa 'bbb' 'ccc'
- /// </pre>
+ /// </code>
/// the output will be two tokens: 'bbb' and 'ccc' (including the ' marks). With the same input
/// but using group=1, the output would be: bbb and ccc (no ' marks)
/// </para>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
index 26f9be1..033a3d7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Pattern
*/
/// <summary>
- /// Factory for <seealso cref="PatternTokenizer"/>.
+ /// Factory for <see cref="PatternTokenizer"/>.
/// This tokenizer uses regex pattern matching to construct distinct tokens
/// for the input stream. It takes two arguments: "pattern" and "group".
/// <p/>
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.Pattern
/// <para>
/// group=-1 (the default) is equivalent to "split". In this case, the tokens will
/// be equivalent to the output from (without empty tokens):
- /// <seealso cref="String#split(java.lang.String)"/>
+ /// <see cref="String#split(java.lang.String)"/>
/// </para>
/// <para>
/// Using group >= 0 selects the matching group as the token. For example, if you have:<br/>
@@ -49,7 +49,7 @@ namespace Lucene.Net.Analysis.Pattern
/// </para>
/// <para>NOTE: This Tokenizer does not output tokens that are of zero length.</para>
///
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="text_ptn" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.PatternTokenizerFactory" pattern="\'([^\']+)\'" group="1"/>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs
index 4c0bd2f..1e1b5de 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilter.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Payloads
/// For example, if the delimiter is '|', then for the string "foo|bar", foo is the token
/// and "bar" is a payload.
/// <p/>
- /// Note, you can also include a <seealso cref="org.apache.lucene.analysis.payloads.PayloadEncoder"/> to convert the payload in an appropriate way (from characters to bytes).
+ /// Note, you can also include a <see cref="org.apache.lucene.analysis.payloads.PayloadEncoder"/> to convert the payload in an appropriate way (from characters to bytes).
/// <p/>
/// Note make sure your Tokenizer doesn't split on the delimiter, or this won't work
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs
index cbaf4f4..5db15c9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/DelimitedPayloadTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Factory for <seealso cref="DelimitedPayloadTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="DelimitedPayloadTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_dlmtd" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float" delimiter="|"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class DelimitedPayloadTokenFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs
index 850402f..f3e38af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/FloatEncoder.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Encode a character array Float as a <seealso cref="BytesRef"/>.
+ /// Encode a character array Float as a <see cref="BytesRef"/>.
/// <p/> </summary>
/// <seealso cref= org.apache.lucene.analysis.payloads.PayloadHelper#encodeFloat(float, byte[], int)
///
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs
index c108ed5..879279b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/IntegerEncoder.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Encode a character array Integer as a <seealso cref="BytesRef"/>.
+ /// Encode a character array Integer as a <see cref="BytesRef"/>.
/// <p/>
- /// See <seealso cref="org.apache.lucene.analysis.payloads.PayloadHelper#encodeInt(int, byte[], int)"/>.
+ /// See <see cref="org.apache.lucene.analysis.payloads.PayloadHelper#encodeInt(int, byte[], int)"/>.
///
///
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
index 873b077..5d3997d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilter.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Assigns a payload to a token based on the <seealso cref="org.apache.lucene.analysis.Token#type()"/>
+ /// Assigns a payload to a token based on the <see cref="org.apache.lucene.analysis.Token#type()"/>
///
///
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs
index 0b32784..310ff0a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/NumericPayloadTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Factory for <seealso cref="NumericPayloadTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NumericPayloadTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_numpayload" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.NumericPayloadTokenFilterFactory" payload="24" typeMatch="word"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NumericPayloadTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs
index 060569e..3304dc9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadEncoder.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Payloads
/// <summary>
/// Mainly for use with the DelimitedPayloadTokenFilter, converts char buffers to
- /// <seealso cref="BytesRef"/>.
+ /// <see cref="BytesRef"/>.
/// <p/>
/// NOTE: This interface is subject to change
///
@@ -32,8 +32,8 @@ namespace Lucene.Net.Analysis.Payloads
BytesRef Encode(char[] buffer);
/// <summary>
- /// Convert a char array to a <seealso cref="BytesRef"/> </summary>
- /// <returns> encoded <seealso cref="BytesRef"/> </returns>
+ /// Convert a char array to a <see cref="BytesRef"/> </summary>
+ /// <returns> encoded <see cref="BytesRef"/> </returns>
BytesRef Encode(char[] buffer, int offset, int length);
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs
index 85f9614..7c861f0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/PayloadHelper.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Payloads
}
/// <summary>
- /// Decode the payload that was encoded using <seealso cref="#encodeFloat(float)"/>.
+ /// Decode the payload that was encoded using <see cref="#encodeFloat(float)"/>.
/// NOTE: the length of the array must be at least offset + 4 long. </summary>
/// <param name="bytes"> The bytes to decode </param>
/// <param name="offset"> The offset into the array. </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs
index 9c5b7ff..cffa398 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilter.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Adds the <seealso cref="OffsetAttribute#startOffset()"/>
- /// and <seealso cref="OffsetAttribute#endOffset()"/>
+ /// Adds the <see cref="OffsetAttribute#startOffset()"/>
+ /// and <see cref="OffsetAttribute#endOffset()"/>
/// First 4 bytes are the start
///
///
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs
index 1f411e2..111feef 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TokenOffsetPayloadTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Factory for <seealso cref="TokenOffsetPayloadTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="TokenOffsetPayloadTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_tokenoffset" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.TokenOffsetPayloadTokenFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class TokenOffsetPayloadTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs
index 81868db..2b3b076 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilter.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Makes the <seealso cref="org.apache.lucene.analysis.Token#type()"/> a payload.
+ /// Makes the <see cref="org.apache.lucene.analysis.Token#type()"/> a payload.
///
- /// Encodes the type using <seealso cref="String#getBytes(String)"/> with "UTF-8" as the encoding
+ /// Encodes the type using <see cref="String#getBytes(String)"/> with "UTF-8" as the encoding
///
///
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs
index 5f14bf6..a990d0a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/TypeAsPayloadTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Payloads
*/
/// <summary>
- /// Factory for <seealso cref="TypeAsPayloadTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="TypeAsPayloadTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_typeaspayload" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.TypeAsPayloadTokenFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class TypeAsPayloadTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs
index 9e103b7..2c1ae97 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Position
/// Set the positionIncrement of all tokens to the "positionIncrement",
/// except the first return token which retains its original positionIncrement value.
/// The default positionIncrement value is zero. </summary>
- /// @deprecated (4.4) PositionFilter makes <seealso cref="TokenStream"/> graphs inconsistent
+ /// @deprecated (4.4) PositionFilter makes <see cref="TokenStream"/> graphs inconsistent
/// which can cause highlighting bugs. Its main use-case being to make
/// <a href="{@docRoot}/../queryparser/overview-summary.html">QueryParser</a>
/// generate boolean queries instead of phrase queries, it is now advised to use
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs
index 2fc4993..dfe4d46 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Position/PositionFilterFactory.cs
@@ -23,16 +23,16 @@ namespace Lucene.Net.Analysis.Position
*/
/// <summary>
- /// Factory for <seealso cref="PositionFilter"/>.
+ /// Factory for <see cref="PositionFilter"/>.
/// Set the positionIncrement of all tokens to the "positionIncrement", except the first return token which retains its
/// original positionIncrement value. The default positionIncrement value is zero.
- /// <pre class="prettyprint">
+ /// <code>
/// <fieldType name="text_position" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.PositionFilterFactory" positionIncrement="0"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
/// <seealso cref=PositionFilter/>
[Obsolete("(4.4)")]
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
index c33bab5..a51dae7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
@@ -28,11 +28,11 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Portuguese.
+ /// <see cref="Analyzer"/> for Portuguese.
/// <para>
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating PortugueseAnalyzer:
/// <ul>
/// <li> As of 3.6, PortugueseLightStemFilter is used for less aggressive stemming.
@@ -87,7 +87,7 @@ namespace Lucene.Net.Analysis.Pt
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public PortugueseAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -106,7 +106,7 @@ namespace Lucene.Net.Analysis.Pt
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -120,15 +120,15 @@ namespace Lucene.Net.Analysis.Pt
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="PortugueseLightStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="PortugueseLightStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs
index c079281..e557bff 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="PortugueseLightStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="PortugueseLightStemmer"/> to stem
/// Portuguese words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class PortugueseLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs
index a0bf456..12dbdfd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseLightStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// Factory for <seealso cref="PortugueseLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PortugueseLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_ptlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.PortugueseLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PortugueseLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs
index cfd6e46..6f63d4c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="PortugueseMinimalStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="PortugueseMinimalStemmer"/> to stem
/// Portuguese words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class PortugueseMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs
index 0cb0acd..db1927f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseMinimalStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// Factory for <seealso cref="PortugueseMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PortugueseMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_ptminstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.PortugueseMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PortugueseMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs
index 88a58ad..560b64a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="PortugueseStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="PortugueseStemmer"/> to stem
/// Portuguese words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class PortugueseStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs
index 54e6dc8..46a5a67 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Pt
*/
/// <summary>
- /// Factory for <seealso cref="PortugueseStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PortugueseStemFilter"/>.
+ /// <code>
/// <fieldType name="text_ptstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.PortugueseStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PortugueseStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
index 39e21c0..8e72225 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Pt
/// <para>
/// Since this time a plural-only modification (RSLP-S) as well as a modification
/// for the Galician language have been implemented. This class parses a configuration
- /// file that describes <seealso cref="Step"/>s, where each Step contains a set of <seealso cref="Rule"/>s.
+ /// file that describes <see cref="Step"/>s, where each Step contains a set of <see cref="Rule"/>s.
/// </para>
/// <para>
/// The general rule format is:
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs
index 991e4ed..1b8b913 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Query/QueryAutoStopWordAnalyzer.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Query
*/
/// <summary>
- /// An <seealso cref="Analyzer"/> used primarily at query time to wrap another analyzer and provide a layer of protection
+ /// An <see cref="Analyzer"/> used primarily at query time to wrap another analyzer and provide a layer of protection
/// which prevents very common words from being passed into queries.
/// <para>
/// For very large indexes the cost
@@ -47,9 +47,9 @@ namespace Lucene.Net.Analysis.Query
/// <summary>
/// Creates a new QueryAutoStopWordAnalyzer with stopwords calculated for all
/// indexed fields from terms with a document frequency percentage greater than
- /// <seealso cref="#defaultMaxDocFreqPercent"/>
+ /// <see cref="#defaultMaxDocFreqPercent"/>
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <exception cref="IOException"> Can be thrown while reading from the IndexReader </exception>
@@ -63,7 +63,7 @@ namespace Lucene.Net.Analysis.Query
/// indexed fields from terms with a document frequency greater than the given
/// maxDocFreq
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <param name="maxDocFreq"> Document frequency terms should be above in order to be stopwords </param>
@@ -78,7 +78,7 @@ namespace Lucene.Net.Analysis.Query
/// indexed fields from terms with a document frequency percentage greater than
/// the given maxPercentDocs
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <param name="maxPercentDocs"> The maximum percentage (between 0.0 and 1.0) of index documents which
@@ -94,7 +94,7 @@ namespace Lucene.Net.Analysis.Query
/// given selection of fields from terms with a document frequency percentage
/// greater than the given maxPercentDocs
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <param name="fields"> Selection of fields to calculate stopwords for </param>
@@ -111,7 +111,7 @@ namespace Lucene.Net.Analysis.Query
/// given selection of fields from terms with a document frequency greater than
/// the given maxDocFreq
/// </summary>
- /// <param name="matchVersion"> Version to be used in <seealso cref="StopFilter"/> </param>
+ /// <param name="matchVersion"> Version to be used in <see cref="StopFilter"/> </param>
/// <param name="delegate"> Analyzer whose TokenStream will be filtered </param>
/// <param name="indexReader"> IndexReader to identify the stopwords from </param>
/// <param name="fields"> Selection of fields to calculate stopwords for </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
index 8179914..117be89 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Reverse
/// wildcards search.
/// </para>
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating ReverseStringFilter, or when using any of
/// its static methods:
/// <ul>
@@ -68,27 +68,27 @@ namespace Lucene.Net.Analysis.Reverse
/// <summary>
/// Create a new ReverseStringFilter that reverses all tokens in the
- /// supplied <seealso cref="TokenStream"/>.
+ /// supplied <see cref="TokenStream"/>.
/// <para>
/// The reversed tokens will not be marked.
/// </para>
/// </summary>
/// <param name="matchVersion"> See <a href="#version">above</a> </param>
- /// <param name="in"> <seealso cref="TokenStream"/> to filter </param>
+ /// <param name="in"> <see cref="TokenStream"/> to filter </param>
public ReverseStringFilter(LuceneVersion matchVersion, TokenStream @in) : this(matchVersion, @in, NOMARKER)
{
}
/// <summary>
/// Create a new ReverseStringFilter that reverses and marks all tokens in the
- /// supplied <seealso cref="TokenStream"/>.
+ /// supplied <see cref="TokenStream"/>.
/// <para>
/// The reversed tokens will be prepended (marked) by the <code>marker</code>
/// character.
/// </para>
/// </summary>
/// <param name="matchVersion"> See <a href="#version">above</a> </param>
- /// <param name="in"> <seealso cref="TokenStream"/> to filter </param>
+ /// <param name="in"> <see cref="TokenStream"/> to filter </param>
/// <param name="marker"> A character used to mark reversed tokens </param>
public ReverseStringFilter(LuceneVersion matchVersion, TokenStream @in, char marker) : base(@in)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs
index fd69e30..ee3ce61 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Reverse
*/
/// <summary>
- /// Factory for <seealso cref="ReverseStringFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ReverseStringFilter"/>.
+ /// <code>
/// <fieldType name="text_rvsstr" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ReverseStringFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
///
/// @since solr 1.4
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
index 9dce193..6c0d9d3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Ro
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Romanian.
+ /// <see cref="Analyzer"/> for Romanian.
/// </summary>
public sealed class RomanianAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Ro
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public RomanianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Ro
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Ro
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
index 62ad10a..e62f65b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
@@ -29,14 +29,14 @@ namespace Lucene.Net.Analysis.Ru
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Russian language.
+ /// <see cref="Analyzer"/> for Russian language.
/// <para>
/// Supports an external list of stopwords (words that
/// will not be indexed at all).
/// A default set of stopwords is used unless an alternative list is specified.
/// </para>
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating RussianAnalyzer:
/// <ul>
/// <li> As of 3.1, StandardTokenizer is used, Snowball stemming is done with
@@ -133,14 +133,14 @@ namespace Lucene.Net.Analysis.Ru
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided, and <seealso cref="SnowballFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided, and <see cref="SnowballFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
#pragma warning disable 612, 618
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs
index 15db0f7..1ffa004 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizer.cs
@@ -24,20 +24,20 @@ namespace Lucene.Net.Analysis.Ru
*/
/// <summary>
- /// A RussianLetterTokenizer is a <seealso cref="Tokenizer"/> that extends <seealso cref="LetterTokenizer"/>
+ /// A RussianLetterTokenizer is a <see cref="Tokenizer"/> that extends <see cref="LetterTokenizer"/>
/// by also allowing the basic Latin digits 0-9.
/// <para>
/// <a name="version"/>
- /// You must specify the required <seealso cref="Version"/> compatibility when creating
- /// <seealso cref="RussianLetterTokenizer"/>:
+ /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
+ /// <see cref="RussianLetterTokenizer"/>:
/// <ul>
- /// <li>As of 3.1, <seealso cref="CharTokenizer"/> uses an int based API to normalize and
- /// detect token characters. See <seealso cref="CharTokenizer#isTokenChar(int)"/> and
- /// <seealso cref="CharTokenizer#normalize(int)"/> for details.</li>
+ /// <li>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+ /// detect token characters. See <see cref="CharTokenizer#isTokenChar(int)"/> and
+ /// <see cref="CharTokenizer#normalize(int)"/> for details.</li>
/// </ul>
/// </para>
/// </summary>
- /// @deprecated (3.1) Use <seealso cref="StandardTokenizer"/> instead, which has the same functionality.
+ /// @deprecated (3.1) Use <see cref="StandardTokenizer"/> instead, which has the same functionality.
/// This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use StandardTokenizer instead, which has the same functionality.")]
public class RussianLetterTokenizer : CharTokenizer
@@ -46,7 +46,7 @@ namespace Lucene.Net.Analysis.Ru
private const int DIGIT_9 = '9';
/// Construct a new RussianLetterTokenizer. * <param name="matchVersion"> Lucene version
- /// to match See <seealso cref="<a href="#version">above</a>"/>
+ /// to match See <see cref="<a href="#version">above</a>"/>
/// </param>
/// <param name="in">
/// the input to split up into tokens </param>
@@ -57,12 +57,12 @@ namespace Lucene.Net.Analysis.Ru
/// <summary>
/// Construct a new RussianLetterTokenizer using a given
- /// <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>. * @param
+ /// <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>. * @param
/// matchVersion Lucene version to match See
- /// <seealso cref="<a href="#version">above</a>"/>
+ /// <see cref="<a href="#version">above</a>"/>
/// </summary>
/// <param name="factory">
- /// the attribute factory to use for this <seealso cref="Tokenizer"/> </param>
+ /// the attribute factory to use for this <see cref="Tokenizer"/> </param>
/// <param name="in">
/// the input to split up into tokens </param>
public RussianLetterTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader @in)
@@ -72,7 +72,7 @@ namespace Lucene.Net.Analysis.Ru
/// <summary>
/// Collects only characters which satisfy
- /// <seealso cref="Character#isLetter(int)"/>.
+ /// <see cref="Character#isLetter(int)"/>.
/// </summary>
protected override bool IsTokenChar(int c)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs
index 20f9142..16b09c2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLetterTokenizerFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Ru
* limitations under the License.
*/
- /// @deprecated Use <seealso cref="org.apache.lucene.analysis.standard.StandardTokenizerFactory"/> instead.
+ /// @deprecated Use <see cref="org.apache.lucene.analysis.standard.StandardTokenizerFactory"/> instead.
/// This tokenizer has no Russian-specific functionality.
[Obsolete("Use StandardTokenizerFactory instead.")]
public class RussianLetterTokenizerFactory : TokenizerFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs
index 74ff113..66a1599 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Ru
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="RussianLightStemmer"/> to stem Russian
+ /// A <see cref="TokenFilter"/> that applies <see cref="RussianLightStemmer"/> to stem Russian
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class RussianLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs
index 7def611..8073c78 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianLightStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Ru
*/
/// <summary>
- /// Factory for <seealso cref="RussianLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="RussianLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_rulgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.RussianLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class RussianLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs
index 34f8063..b3634dc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleAnalyzerWrapper.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Shingle
*/
/// <summary>
- /// A ShingleAnalyzerWrapper wraps a <seealso cref="ShingleFilter"/> around another <seealso cref="Analyzer"/>.
+ /// A ShingleAnalyzerWrapper wraps a <see cref="ShingleFilter"/> around another <see cref="Analyzer"/>.
/// <para>
/// A shingle is another name for a token based n-gram.
/// </para>
@@ -97,7 +97,7 @@ namespace Lucene.Net.Analysis.Shingle
}
/// <summary>
- /// Wraps <seealso cref="StandardAnalyzer"/>.
+ /// Wraps <see cref="StandardAnalyzer"/>.
/// </summary>
public ShingleAnalyzerWrapper(LuceneVersion matchVersion)
: this(matchVersion, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE)
@@ -105,7 +105,7 @@ namespace Lucene.Net.Analysis.Shingle
}
/// <summary>
- /// Wraps <seealso cref="StandardAnalyzer"/>.
+ /// Wraps <see cref="StandardAnalyzer"/>.
/// </summary>
public ShingleAnalyzerWrapper(LuceneVersion matchVersion, int minShingleSize, int maxShingleSize)
: this(new StandardAnalyzer(matchVersion), minShingleSize, maxShingleSize)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs
index 61348b4..19b07a0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilter.cs
@@ -165,7 +165,7 @@ namespace Lucene.Net.Analysis.Shingle
/// <summary>
/// Constructs a ShingleFilter with the specified shingle size from the
- /// <seealso cref="TokenStream"/> <code>input</code>
+ /// <see cref="TokenStream"/> <code>input</code>
/// </summary>
/// <param name="input"> input stream </param>
/// <param name="minShingleSize"> minimum shingle size produced by the filter. </param>
@@ -184,7 +184,7 @@ namespace Lucene.Net.Analysis.Shingle
/// <summary>
/// Constructs a ShingleFilter with the specified shingle size from the
- /// <seealso cref="TokenStream"/> <code>input</code>
+ /// <see cref="TokenStream"/> <code>input</code>
/// </summary>
/// <param name="input"> input stream </param>
/// <param name="maxShingleSize"> maximum shingle size produced by the filter. </param>
@@ -378,7 +378,7 @@ namespace Lucene.Net.Analysis.Shingle
/// <para>Get the next token from the input stream.
/// </para>
/// <para>If the next token has <code>positionIncrement > 1</code>,
- /// <code>positionIncrement - 1</code> <seealso cref="#fillerToken"/>s are
+ /// <code>positionIncrement - 1</code> <see cref="#fillerToken"/>s are
/// inserted first.
/// </para>
/// </summary>
@@ -496,10 +496,10 @@ namespace Lucene.Net.Analysis.Shingle
}
/// <summary>
- /// <para>Fills <seealso cref="#inputWindow"/> with input stream tokens, if available,
+ /// <para>Fills <see cref="#inputWindow"/> with input stream tokens, if available,
/// shifting to the right if the window was previously full.
/// </para>
- /// <para>Resets <seealso cref="#gramSize"/> to its minimum value.
+ /// <para>Resets <see cref="#gramSize"/> to its minimum value.
///
/// </para>
/// </summary>
@@ -570,13 +570,13 @@ namespace Lucene.Net.Analysis.Shingle
/// <summary>
/// <para>An instance of this class is used to maintain the number of input
/// stream tokens that will be used to compose the next unigram or shingle:
- /// <seealso cref="#gramSize"/>.
+ /// <see cref="#gramSize"/>.
/// </para>
/// <para><code>gramSize</code> will take on values from the circular sequence
- /// <b>{ [ 1, ] <seealso cref="#minShingleSize"/> [ , ... , <seealso cref="#maxShingleSize"/> ] }</b>.
+ /// <b>{ [ 1, ] <see cref="#minShingleSize"/> [ , ... , <see cref="#maxShingleSize"/> ] }</b>.
/// </para>
/// <para>1 is included in the circular sequence only if
- /// <seealso cref="#outputUnigrams"/> = true.
+ /// <see cref="#outputUnigrams"/> = true.
/// </para>
/// </summary>
private class CircularSequence
@@ -608,10 +608,10 @@ namespace Lucene.Net.Analysis.Shingle
/// <para>Increments this circular number's value to the next member in the
/// circular sequence
/// <code>gramSize</code> will take on values from the circular sequence
- /// <b>{ [ 1, ] <seealso cref="#minShingleSize"/> [ , ... , <seealso cref="#maxShingleSize"/> ] }</b>.
+ /// <b>{ [ 1, ] <see cref="#minShingleSize"/> [ , ... , <see cref="#maxShingleSize"/> ] }</b>.
/// </para>
/// <para>1 is included in the circular sequence only if
- /// <seealso cref="#outputUnigrams"/> = true.
+ /// <see cref="#outputUnigrams"/> = true.
/// </para>
/// </summary>
public virtual void advance()
@@ -636,10 +636,10 @@ namespace Lucene.Net.Analysis.Shingle
/// circular sequence
/// </para>
/// <para><code>gramSize</code> will take on values from the circular sequence
- /// <b>{ [ 1, ] <seealso cref="#minShingleSize"/> [ , ... , <seealso cref="#maxShingleSize"/> ] }</b>.
+ /// <b>{ [ 1, ] <see cref="#minShingleSize"/> [ , ... , <see cref="#maxShingleSize"/> ] }</b>.
/// </para>
/// <para>1 is included in the circular sequence only if
- /// <seealso cref="#outputUnigrams"/> = true.
+ /// <see cref="#outputUnigrams"/> = true.
/// </para>
/// </summary>
public virtual void reset()
@@ -651,8 +651,8 @@ namespace Lucene.Net.Analysis.Shingle
/// <para>Returns true if the current value is the first member of the circular
/// sequence.
/// </para>
- /// <para>If <seealso cref="#outputUnigrams"/> = true, the first member of the circular
- /// sequence will be 1; otherwise, it will be <seealso cref="#minShingleSize"/>.
+ /// <para>If <see cref="#outputUnigrams"/> = true, the first member of the circular
+ /// sequence will be 1; otherwise, it will be <see cref="#minShingleSize"/>.
///
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs
index 9dac23f..782fb83 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Shingle/ShingleFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Shingle
*/
/// <summary>
- /// Factory for <seealso cref="ShingleFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ShingleFilter"/>.
+ /// <code>
/// <fieldType name="text_shingle" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ShingleFilterFactory" minShingleSize="2" maxShingleSize="2"
/// outputUnigrams="true" outputUnigramsIfNoShingles="false" tokenSeparator=" " fillerToken="_"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ShingleFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs
index 3abd14b..bb31ae6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/DateRecognizerSinkFilter.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Sinks
*/
/// <summary>
- /// Attempts to parse the <seealso cref="CharTermAttribute.ToString()"/> as a Date using either the
+ /// Attempts to parse the <see cref="CharTermAttribute.ToString()"/> as a Date using either the
/// <see cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/> or
/// <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/> methods.
/// If a format is passed, <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the current culture and <see cref="DateTimeStyles.None"/>.
- /// Loosely matches standard DateTime formats using <seealso cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Loosely matches standard DateTime formats using <see cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
public DateRecognizerSinkFilter()
: this((string[])null, DateTimeFormatInfo.CurrentInfo, DateTimeStyles.None)
@@ -47,7 +47,7 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied culture and <see cref="DateTimeStyles.None"/>.
- /// Loosely matches standard DateTime formats using <seealso cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Loosely matches standard DateTime formats using <see cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
/// <param name="culture">An object that supplies culture-specific format information</param>
public DateRecognizerSinkFilter(IFormatProvider culture)
@@ -56,9 +56,9 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the current culture and <see cref="DateTimeStyles.None"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="format">The allowable format of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="format">The allowable format of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, it must match the format of the date exactly to get a match.</param>
public DateRecognizerSinkFilter(string format)
: this(new string[] { format }, DateTimeFormatInfo.CurrentInfo, DateTimeStyles.None)
@@ -66,9 +66,9 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the current culture and <see cref="DateTimeStyles.None"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="formats">An array of allowable formats of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="formats">An array of allowable formats of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, one of them must match the format of the date exactly to get a match.</param>
public DateRecognizerSinkFilter(string[] formats)
: this(formats, DateTimeFormatInfo.CurrentInfo, DateTimeStyles.None)
@@ -76,21 +76,21 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied culture and <see cref="DateTimeStyles"/>.
- /// Loosely matches standard DateTime formats using <seealso cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Loosely matches standard DateTime formats using <see cref="DateTime.TryParse(string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
/// If supplied, one of them must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
/// <param name="style">A bitwise combination of enumeration values that indicates the permitted format of s.
- /// A typical value to specify is <seealso cref="DateTimeStyles.None"/></param>
+ /// A typical value to specify is <see cref="DateTimeStyles.None"/></param>
public DateRecognizerSinkFilter(IFormatProvider culture, DateTimeStyles style)
:this((string[])null, culture, style)
{ }
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied format, culture and <see cref="DateTimeStyles.None"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="format">The allowable format of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="format">The allowable format of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, it must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
public DateRecognizerSinkFilter(string format, IFormatProvider culture)
@@ -99,9 +99,9 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied formats, culture and <see cref="DateTimeStyles.None"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="formats">An array of allowable formats of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="formats">An array of allowable formats of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, one of them must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
public DateRecognizerSinkFilter(string[] formats, IFormatProvider culture)
@@ -110,26 +110,26 @@ namespace Lucene.Net.Analysis.Sinks
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied format, culture and <see cref="DateTimeStyles"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string, IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="format">The allowable format of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="format">The allowable format of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, it must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
/// <param name="style">A bitwise combination of enumeration values that indicates the permitted format of s.
- /// A typical value to specify is <seealso cref="DateTimeStyles.None"/></param>
+ /// A typical value to specify is <see cref="DateTimeStyles.None"/></param>
public DateRecognizerSinkFilter(string format, IFormatProvider culture, DateTimeStyles style)
: this(new string[] { format }, culture, style)
{ }
/// <summary>
/// Creates a new instance of <see cref="DateRecognizerSinkFilter"/> using the supplied formats, culture and <see cref="DateTimeStyles"/>.
- /// Strictly matches the supplied DateTime formats using <seealso cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
+ /// Strictly matches the supplied DateTime formats using <see cref="DateTime.TryParseExact(string, string[], IFormatProvider, DateTimeStyles, out DateTime)"/>.
/// </summary>
- /// <param name="formats">An array of allowable formats of the <seealso cref="CharTermAttribute.ToString()"/>.
+ /// <param name="formats">An array of allowable formats of the <see cref="CharTermAttribute.ToString()"/>.
/// If supplied, one of them must match the format of the date exactly to get a match.</param>
/// <param name="culture">An object that supplies culture-specific format information</param>
/// <param name="style">A bitwise combination of enumeration values that indicates the permitted format of s.
- /// A typical value to specify is <seealso cref="DateTimeStyles.None"/></param>
+ /// A typical value to specify is <see cref="DateTimeStyles.None"/></param>
public DateRecognizerSinkFilter(string[] formats, IFormatProvider culture, DateTimeStyles style)
{
this.m_culture = culture;