You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by di...@apache.org on 2009/12/14 20:06:28 UTC
svn commit: r890443 [1/3] - in /incubator/lucene.net/trunk/C#/src:
Lucene.Net/Analysis/ Lucene.Net/Analysis/Standard/
Lucene.Net/Analysis/Tokenattributes/ Lucene.Net/Document/ Lucene.Net/Index/
Lucene.Net/QueryParser/ Lucene.Net/Search/ Lucene.Net/Sear...
Author: digy
Date: Mon Dec 14 19:05:31 2009
New Revision: 890443
URL: http://svn.apache.org/viewvc?rev=890443&view=rev
Log:
Some XML documentation clean up
Modified:
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CompoundFileWriter.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfos.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SnapshotDeletionPolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermPositions.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/QueryParser/MultiFieldQueryParser.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/QueryParser/QueryParser.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/BooleanQuery.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/BooleanScorer2.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Collector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ComplexExplanation.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DefaultSimilarity.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DisjunctionMaxQuery.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DisjunctionSumScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DocIdSetIterator.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Explanation.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCache.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldComparator.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ByteFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/DocValues.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/FieldCacheSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/FloatFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/IntFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/MultiValueSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/OrdFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ReverseOrdFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ShortFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FuzzyTermEnum.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/HitCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/HitQueue.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Hits.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/IndexSearcher.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/NumericRangeQuery.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/PhraseScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Query.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ReqExclScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ReqOptSumScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ScoreCachingWrappingScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ScoreDocComparator.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Scorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Searchable.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Searcher.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Similarity.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/SloppyPhraseScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Sort.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/NearSpansOrdered.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/Spans.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TermScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TimeLimitedCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TimeLimitingCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopDocsCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopFieldCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Weight.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/Directory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FSDirectory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FileSwitchDirectory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/LockFactory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/NativeFSLockFactory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/SimpleFSLockFactory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/FieldCacheSanityChecker.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/PriorityQueue.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/SmallFloat.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/SortedVIntList.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/UnicodeUtil.cs
incubator/lucene.net/trunk/C#/src/Test/Search/CheckHits.cs
incubator/lucene.net/trunk/C#/src/Test/Search/TestDisjunctionMaxQuery.cs
incubator/lucene.net/trunk/C#/src/Test/Search/TestExplanations.cs
incubator/lucene.net/trunk/C#/src/Test/Util/LocalizedTestCase.cs
incubator/lucene.net/trunk/C#/src/Test/Util/LuceneTestCase.cs
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs Mon Dec 14 19:05:31 2009
@@ -31,29 +31,29 @@
/// those characters with reasonable ASCII alternatives are converted:
///
/// <ul>
- /// <li>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a>
- /// <li>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a>
- /// <li>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a>
- /// <li>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a>
- /// <li>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a>
- /// <li>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a>
- /// <li>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a>
- /// <li>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a>
- /// <li>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a>
- /// <li>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a>
- /// <li>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a>
- /// <li>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a>
- /// <li>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a>
- /// <li>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a>
- /// <li>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a>
- /// <li>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a>
+ /// <li>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></li>
+ /// <li>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></li>
+ /// <li>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></li>
+ /// <li>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></li>
+ /// <li>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a></li>
+ /// <li>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a></li>
+ /// <li>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a></li>
+ /// <li>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a></li>
+ /// <li>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a></li>
+ /// <li>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a></li>
+ /// <li>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a></li>
+ /// <li>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a></li>
+ /// <li>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a></li>
+ /// <li>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></li>
+ /// <li>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></li>
+ /// <li>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></li>
/// </ul>
///
/// See: <a href="http://en.wikipedia.org/wiki/Latin_characters_in_Unicode">http://en.wikipedia.org/wiki/Latin_characters_in_Unicode</a>
///
/// The set of character conversions supported by this class is a superset of
/// those supported by Lucene's {@link ISOLatin1AccentFilter} which strips
- /// accents from Latin1 characters. For example, 'à' will be replaced by
+ /// accents from Latin1 characters. For example, '&agrave;' will be replaced by
/// 'a'.
/// </summary>
public sealed class ASCIIFoldingFilter:TokenFilter
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/NumericTokenStream.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs Mon Dec 14 19:05:31 2009
@@ -44,7 +44,7 @@
/// should use this class.
///
/// <p/>See {@link NumericField} for capabilities of fields
- /// indexed numerically.</p>
+ /// indexed numerically.<p/>
///
/// <p/>Here's an example usage, for an <code>int</code> field:
///
@@ -74,17 +74,17 @@
///
/// <p/>This stream is not intended to be used in analyzers;
/// it's more for iterating the different precisions during
- /// indexing a specific numeric value.</p>
+ /// indexing a specific numeric value.<p/>
///
/// <p/><b>NOTE</b>: as token streams are only consumed once
/// the document is added to the index, if you index more
/// than one numeric field, use a separate <code>NumericTokenStream</code>
- /// instance for each.</p>
+ /// instance for each.<p/>
///
/// <p/>See {@link NumericRangeQuery} for more details on the
/// <a
/// href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- /// parameter as well as how numeric fields work under the hood.</p>
+ /// parameter as well as how numeric fields work under the hood.<p/>
///
/// <p/><font color="red"><b>NOTE:</b> This API is experimental and
/// might change in incompatible ways in the next release.</font>
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs Mon Dec 14 19:05:31 2009
@@ -32,9 +32,9 @@
/// You must specify the required {@link Version} compatibility when creating
/// StandardAnalyzer:
/// <ul>
- /// <li>As of 2.9, StopFilter preserves position increments
+ /// <li>As of 2.9, StopFilter preserves position increments</li>
/// <li>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
- /// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>
+ /// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></li>
/// </ul>
///
/// </summary>
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs Mon Dec 14 19:05:31 2009
@@ -36,10 +36,10 @@
///
/// <ul>
/// <li>Splits words at punctuation characters, removing punctuation. However, a
- /// dot that's not followed by whitespace is considered part of a token.
+ /// dot that's not followed by whitespace is considered part of a token.</li>
/// <li>Splits words at hyphens, unless there's a number in the token, in which case
- /// the whole token is interpreted as a product number and is not split.
- /// <li>Recognizes email addresses and internet hostnames as one token.
+ /// the whole token is interpreted as a product number and is not split.</li>
+ /// <li>Recognizes email addresses and internet hostnames as one token.</li>
/// </ul>
///
/// <p/>Many applications have specific tokenizer needs. If this tokenizer does
@@ -52,7 +52,7 @@
/// StandardAnalyzer:
/// <ul>
/// <li>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
- /// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>
+ /// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></li>
/// </ul>
/// </summary>
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Token.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs Mon Dec 14 19:05:31 2009
@@ -50,7 +50,7 @@
/// length byte array. Use {@link TermPositions#GetPayloadLength()} and
/// {@link TermPositions#GetPayload(byte[], int)} to retrieve the payloads from the index.
/// </summary>
- /// <summary><br><br>
+ /// <summary><br/><br/>
/// </summary>
/// <summary><p/><b>NOTE:</b> As of 2.9, Token implements all {@link Attribute} interfaces
/// that are part of core Lucene and can be found in the {@code tokenattributes} subpackage.
@@ -58,7 +58,7 @@
/// be used as convenience class that implements all {@link Attribute}s, which is especially useful
/// to easily switch from the old to the new TokenStream API.
/// </summary>
- /// <summary><br><br>
+ /// <summary><br/><br/>
/// <p/><b>NOTE:</b> As of 2.3, Token stores the term text
/// internally as a malleable char[] termBuffer instead of
/// String termText. The indexing code and core tokenizers
@@ -69,7 +69,7 @@
/// String for every term. The APIs that accept String
/// termText are still available but a warning about the
/// associated performance cost has been added (below). The
- /// {@link #TermText()} method has been deprecated.</p>
+ /// {@link #TermText()} method has been deprecated.<p/>
/// </summary>
/// <summary><p/>Tokenizers and TokenFilters should try to re-use a Token instance when
/// possible for best performance, by implementing the
@@ -86,7 +86,7 @@
/// or with {@link System#arraycopy(Object, int, Object, int, int)}, and finally call {@link #SetTermLength(int)} to
/// set the length of the term text. See <a target="_top"
/// href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
- /// for details.</p>
+ /// for details.<p/>
/// <p/>Typical Token reuse patterns:
/// <ul>
/// <li> Copying text from a string (type is reset to {@link #DEFAULT_TYPE} if not
@@ -101,7 +101,6 @@
/// return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
/// </pre>
/// </li>
- /// </li>
/// <li> Copying text from char[] buffer (type is reset to {@link #DEFAULT_TYPE}
/// if not specified):<br/>
/// <pre>
@@ -129,7 +128,7 @@
/// source text, so be careful in adjusting them.</li>
/// <li>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</li>
/// </ul>
- /// </p>
+ /// <p/>
/// </summary>
/// <seealso cref="Lucene.Net.Index.Payload">
/// </seealso>
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/TokenStream.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs Mon Dec 14 19:05:31 2009
@@ -38,9 +38,9 @@
/// <p/>
/// This is an abstract class. Concrete subclasses are:
/// <ul>
- /// <li>{@link Tokenizer}, a <code>TokenStream</code> whose input is a Reader; and
+ /// <li>{@link Tokenizer}, a <code>TokenStream</code> whose input is a Reader; and</li>
/// <li>{@link TokenFilter}, a <code>TokenStream</code> whose input is another
- /// <code>TokenStream</code>.
+ /// <code>TokenStream</code>.</li>
/// </ul>
/// A new <code>TokenStream</code> API has been introduced with Lucene 2.9. This API
/// has moved from being {@link Token} based to {@link Attribute} based. While
@@ -57,16 +57,16 @@
/// <b>The workflow of the new <code>TokenStream</code> API is as follows:</b>
/// <ol>
/// <li>Instantiation of <code>TokenStream</code>/{@link TokenFilter}s which add/get
- /// attributes to/from the {@link AttributeSource}.
- /// <li>The consumer calls {@link TokenStream#Reset()}.
+ /// attributes to/from the {@link AttributeSource}.</li>
+ /// <li>The consumer calls {@link TokenStream#Reset()}.</li>
/// <li>The consumer retrieves attributes from the stream and stores local
- /// references to all attributes it wants to access
+ /// references to all attributes it wants to access</li>
/// <li>The consumer calls {@link #IncrementToken()} until it returns false and
- /// consumes the attributes after each call.
+ /// consumes the attributes after each call.</li>
/// <li>The consumer calls {@link #End()} so that any end-of-stream operations
- /// can be performed.
+ /// can be performed.</li>
/// <li>The consumer calls {@link #Close()} to release any resource when finished
- /// using the <code>TokenStream</code>
+ /// using the <code>TokenStream</code></li>
/// </ol>
/// To make sure that filters and consumers know which attributes are available,
/// the attributes must be added during instantiation. Filters and consumers are
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs Mon Dec 14 19:05:31 2009
@@ -35,14 +35,14 @@
/// including either stem will match. In this case, all but the first stem's
/// increment should be set to zero: the increment of the first instance
/// should be one. Repeating a token with an increment of zero can also be
- /// used to boost the scores of matches on that token.
+ /// used to boost the scores of matches on that token.</li>
///
/// <li>Set it to values greater than one to inhibit exact phrase matches.
/// If, for example, one does not want phrases to match across removed stop
/// words, then one could build a stop word filter that removes stop words and
/// also sets the increment to the number of stop words removed before each
/// non-stop word. Then exact phrase queries will only match when the terms
- /// occur with no intervening stop words.
+ /// occur with no intervening stop words.</li>
///
/// </ul>
///
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs Mon Dec 14 19:05:31 2009
@@ -36,14 +36,14 @@
/// including either stem will match. In this case, all but the first stem's
/// increment should be set to zero: the increment of the first instance
/// should be one. Repeating a token with an increment of zero can also be
- /// used to boost the scores of matches on that token.
+ /// used to boost the scores of matches on that token.</li>
///
/// <li>Set it to values greater than one to inhibit exact phrase matches.
/// If, for example, one does not want phrases to match across removed stop
/// words, then one could build a stop word filter that removes stop words and
/// also sets the increment to the number of stop words removed before each
/// non-stop word. Then exact phrase queries will only match when the terms
- /// occur with no intervening stop words.
+ /// occur with no intervening stop words.</li>
///
/// </ul>
/// </summary>
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Document.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs Mon Dec 14 19:05:31 2009
@@ -140,12 +140,12 @@
/// <summary> <p/>Adds a field to a document. Several fields may be added with
/// the same name. In this case, if the fields are indexed, their text is
- /// treated as though appended for the purposes of search.</p>
+ /// treated as though appended for the purposes of search.<p/>
/// <p/> Note that add like the removeField(s) methods only makes sense
/// prior to adding a document to an index. These methods cannot
/// be used to change the content of an existing index! In order to achieve this,
/// a document has to be deleted from an index and a new changed version of that
- /// document has to be added.</p>
+ /// document has to be added.<p/>
/// </summary>
public void Add(Fieldable field)
{
@@ -154,12 +154,12 @@
/// <summary> <p/>Removes field with the specified name from the document.
/// If multiple fields exist with this name, this method removes the first field that has been added.
- /// If there is no field with the specified name, the document remains unchanged.</p>
+ /// If there is no field with the specified name, the document remains unchanged.<p/>
/// <p/> Note that the removeField(s) methods like the add method only make sense
/// prior to adding a document to an index. These methods cannot
/// be used to change the content of an existing index! In order to achieve this,
/// a document has to be deleted from an index and a new changed version of that
- /// document has to be added.</p>
+ /// document has to be added.<p/>
/// </summary>
public void RemoveField(System.String name)
{
@@ -176,12 +176,12 @@
}
/// <summary> <p/>Removes all fields with the given name from the document.
- /// If there is no field with the specified name, the document remains unchanged.</p>
+ /// If there is no field with the specified name, the document remains unchanged.<p/>
/// <p/> Note that the removeField(s) methods like the add method only make sense
/// prior to adding a document to an index. These methods cannot
/// be used to change the content of an existing index! In order to achieve this,
/// a document has to be deleted from an index and a new changed version of that
- /// document has to be added.</p>
+ /// document has to be added.<p/>
/// </summary>
public void RemoveFields(System.String name)
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Field.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs Mon Dec 14 19:05:31 2009
@@ -243,12 +243,12 @@
/// instance to improve indexing speed by avoiding GC cost
/// of new'ing and reclaiming Field instances. Typically
/// a single {@link Document} instance is re-used as
- /// well. This helps most on small documents.</p>
+ /// well. This helps most on small documents.<p/>
///
/// <p/>Each Field instance should only be used once
/// within a single {@link Document} instance. See <a
/// href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed">ImproveIndexingSpeed</a>
- /// for details.</p>
+ /// for details.<p/>
/// </summary>
public void SetValue(System.String value_Renamed)
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Fieldable.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs Mon Dec 14 19:05:31 2009
@@ -29,7 +29,7 @@
/// This means new methods may be added from version to version. This change only affects the Fieldable API; other backwards
/// compatibility promises remain intact. For example, Lucene can still
/// read and write indices created within the same major version.
- /// </p>
+ /// <p/>
///
///
/// </summary>
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/NumericField.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs Mon Dec 14 19:05:31 2009
@@ -64,7 +64,7 @@
/// value, either by dividing the result of
/// {@link java.util.Date#getTime} or using the separate getters
/// (for year, month, etc.) to construct an <code>int</code> or
- /// <code>long</code> value.</p>
+ /// <code>long</code> value.<p/>
///
/// <p/>To perform range querying or filtering against a
/// <code>NumericField</code>, use {@link NumericRangeQuery} or {@link
@@ -72,25 +72,25 @@
/// <code>NumericField</code>, use the normal numeric sort types, eg
/// {@link SortField#INT} (note that {@link SortField#AUTO}
/// will not work with these fields). <code>NumericField</code> values
- /// can also be loaded directly from {@link FieldCache}.</p>
+ /// can also be loaded directly from {@link FieldCache}.<p/>
///
/// <p/>By default, a <code>NumericField</code>'s value is not stored but
/// is indexed for range filtering and sorting. You can use
/// the {@link #NumericField(String,Field.Store,boolean)}
- /// constructor if you need to change these defaults.</p>
+ /// constructor if you need to change these defaults.<p/>
///
/// <p/>You may add the same field name as a <code>NumericField</code> to
/// the same document more than once. Range querying and
/// filtering will be the logical OR of all values; so a range query
/// will hit all documents that have at least one value in
/// the range. However sort behavior is not defined. If you need to sort,
- /// you should separately index a single-valued <code>NumericField</code>.</p>
+ /// you should separately index a single-valued <code>NumericField</code>.<p/>
///
/// <p/>A <code>NumericField</code> will consume somewhat more disk space
/// in the index than an ordinary single-valued field.
/// However, for a typical index that includes substantial
/// textual content per document, this increase will likely
- /// be in the noise. </p>
+ /// be in the noise. <p/>
///
/// <p/>Within Lucene, each numeric value is indexed as a
/// <em>trie</em> structure, where each term is logically
@@ -122,12 +122,12 @@
/// <p/>If you only need to sort by numeric value, and never
/// run range querying/filtering, you can index using a
/// <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- /// This will minimize disk space consumed. </p>
+ /// This will minimize disk space consumed. <p/>
///
/// <p/>More advanced users can instead use {@link
/// NumericTokenStream} directly, when indexing numbers. This
/// class is a wrapper around this token stream type for
- /// easier, more intuitive usage.</p>
+ /// easier, more intuitive usage.<p/>
///
/// <p/><b>NOTE:</b> This class is only used during
/// indexing. When retrieving the stored field value from a
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CompoundFileWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/CompoundFileWriter.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CompoundFileWriter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CompoundFileWriter.cs Mon Dec 14 19:05:31 2009
@@ -26,7 +26,7 @@
/// <summary> Combines multiple files into a single compound file.
- /// The file format:<br>
+ /// The file format:<br/>
/// <ul>
/// <li>VInt fileCount</li>
/// <li>{Directory}
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/DirectoryReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs Mon Dec 14 19:05:31 2009
@@ -1086,7 +1086,7 @@
/// <summary> Expert: return the IndexCommit that this reader has opened.
/// <p/>
- /// <p/><b>WARNING</b>: this API is new and experimental and may suddenly change.</p>
+ /// <p/><b>WARNING</b>: this API is new and experimental and may suddenly change.<p/>
/// </summary>
public override IndexCommit GetIndexCommit()
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/FieldInvertState.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs Mon Dec 14 19:05:31 2009
@@ -27,7 +27,7 @@
/// also used to calculate the normalization factor for a field.
///
/// <p/><b>WARNING</b>: This API is new and experimental, and may suddenly
- /// change.</p>
+ /// change.<p/>
/// </summary>
public sealed class FieldInvertState
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/FilterIndexReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs Mon Dec 14 19:05:31 2009
@@ -143,8 +143,8 @@
/// <summary> <p/>Construct a FilterIndexReader based on the specified base reader.
/// Directory locking for delete, undeleteAll, and setNorm operations is
- /// left to the base reader.</p>
- /// <p/>Note that base reader is closed if this FilterIndexReader is closed.</p>
+ /// left to the base reader.<p/>
+ /// <p/>Note that base reader is closed if this FilterIndexReader is closed.<p/>
/// </summary>
/// <param name="in">specified base reader.
/// </param>
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexCommit.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs Mon Dec 14 19:05:31 2009
@@ -23,21 +23,21 @@
{
/// <summary> <p/>Expert: represents a single commit into an index as seen by the
- /// {@link IndexDeletionPolicy} or {@link IndexReader}.</p>
+ /// {@link IndexDeletionPolicy} or {@link IndexReader}.<p/>
///
/// <p/> Changes to the content of an index are made visible
/// only after the writer who made that change commits by
/// writing a new segments file
/// (<code>segments_N</code>). This point in time, when the
/// action of writing of a new segments file to the directory
- /// is completed, is an index commit.</p>
+ /// is completed, is an index commit.<p/>
///
/// <p/>Each index commit point has a unique segments file
/// associated with it. The segments file associated with a
- /// later index commit point would have a larger N.</p>
+ /// later index commit point would have a larger N.<p/>
///
/// <p/><b>WARNING</b>: This API is a new and experimental and
- /// may suddenly change. </p>
+ /// may suddenly change. <p/>
/// </summary>
public abstract class IndexCommit : IndexCommitPoint
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexDeletionPolicy.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs Mon Dec 14 19:05:31 2009
@@ -29,7 +29,7 @@
/// are deleted from the index directory. The default deletion policy
/// is {@link KeepOnlyLastCommitDeletionPolicy}, which always
/// removes old commits as soon as a new commit is done (this
- /// matches the behavior before 2.2).</p>
+ /// matches the behavior before 2.2).<p/>
///
/// <p/>One expected use case for this (and the reason why it
/// was first created) is to work around problems with an
@@ -44,7 +44,7 @@
/// increase the storage requirements of the index. See <a
/// target="top"
/// href="http://issues.apache.org/jira/browse/LUCENE-710">LUCENE-710</a>
- /// for details.</p>
+ /// for details.<p/>
/// </summary>
public interface IndexDeletionPolicy
@@ -52,13 +52,13 @@
/// <summary> <p/>This is called once when a writer is first
/// instantiated to give the policy a chance to remove old
- /// commit points.</p>
+ /// commit points.<p/>
///
/// <p/>The writer locates all index commits present in the
/// index directory and calls this method. The policy may
/// choose to delete some of the commit points, doing so by
/// calling method {@link IndexCommit#delete delete()}
- /// of {@link IndexCommit}.</p>
+ /// of {@link IndexCommit}.<p/>
///
/// <p/><u>Note:</u> the last CommitPoint is the most recent one,
/// i.e. the "front index state". Be careful not to delete it,
@@ -74,11 +74,11 @@
/// <summary> <p/>This is called each time the writer completed a commit.
/// This gives the policy a chance to remove old commit points
- /// with each commit.</p>
+ /// with each commit.<p/>
///
/// <p/>The policy may now choose to delete old commit points
/// by calling method {@link IndexCommit#delete delete()}
- /// of {@link IndexCommit}.</p>
+ /// of {@link IndexCommit}.<p/>
///
/// <p/>If writer has <code>autoCommit = true</code> then
/// this method will in general be called many times during
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexModifier.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs Mon Dec 14 19:05:31 2009
@@ -29,7 +29,7 @@
/// <summary> <p/>[Note that as of <b>2.1</b>, all but one of the
/// methods in this class are available via {@link
/// IndexWriter}. The one method that is not available is
- /// {@link #DeleteDocument(int)}.]</p>
+ /// {@link #DeleteDocument(int)}.]<p/>
///
/// A class to modify an index, i.e. to delete and add documents. This
/// class hides {@link IndexReader} and {@link IndexWriter} so that you
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs Mon Dec 14 19:05:31 2009
@@ -44,7 +44,7 @@
/// instead always throw UnsupportedOperationException. Subclasses are
/// strongly encouraged to override these methods, but in many cases may not
/// need to.
- /// </p>
+ /// <p/>
/// <p/>
/// <b>NOTE</b>: as of 2.4, it's possible to open a read-only
/// IndexReader using one of the static open methods that
@@ -55,7 +55,7 @@
/// read/write IndexReader. But in 3.0 this default will
/// change to true, meaning you must explicitly specify false
/// if you want to make changes with the resulting IndexReader.
- /// </p>
+ /// <p/>
/// <a name="thread-safety"></a><p/><b>NOTE</b>: {@link
/// <code>IndexReader</code>} instances are completely thread
/// safe, meaning multiple threads can call any of its methods,
@@ -589,7 +589,7 @@
/// <p/>
/// If the index has not changed since this instance was (re)opened, then this
/// call is a NOOP and returns this instance. Otherwise, a new instance is
- /// returned. The old instance is <b>not</b> closed and remains usable.<br>
+ /// returned. The old instance is <b>not</b> closed and remains usable.<br/>
/// <p/>
/// If the reader is reopened, even though they share
/// resources internally, it's safe to make changes
@@ -860,7 +860,7 @@
/// this method returns the version recorded in the commit that the reader
/// opened. This version is advanced every time {@link IndexWriter#Commit} is
/// called.
- /// </p>
+ /// <p/>
///
/// <p/>
/// If instead this reader is a near real-time reader (ie, obtained by a call
@@ -870,7 +870,7 @@
/// with the writer, the version will not changed until a commit is
/// completed. Thus, you should not rely on this method to determine when a
/// near real-time reader should be opened. Use {@link #IsCurrent} instead.
- /// </p>
+ /// <p/>
///
/// </summary>
/// <throws> UnsupportedOperationException </throws>
@@ -904,7 +904,7 @@
/// N*termIndexInterval terms in the index is loaded into
/// memory. By setting this to a value > 1 you can reduce
/// memory usage, at the expense of higher latency when
- /// loading a TermInfo. The default value is 1.</p>
+ /// loading a TermInfo. The default value is 1.<p/>
///
/// <b>NOTE:</b> you must call this before the term
/// index is loaded. If the index is already loaded,
@@ -937,7 +937,7 @@
/// {@link #open}, or {@link #reopen} on a reader based on a Directory), then
/// this method checks if any further commits (see {@link IndexWriter#commit}
/// have occurred in that directory).
- /// </p>
+ /// <p/>
///
/// <p/>
/// If instead this reader is a near real-time reader (ie, obtained by a call
@@ -946,12 +946,12 @@
/// occurred, or any new uncommitted changes have taken place via the writer.
/// Note that even if the writer has only performed merging, this method will
/// still return false.
- /// </p>
+ /// <p/>
///
/// <p/>
/// In any event, if this returns false, you should call {@link #reopen} to
/// get a new reader that sees the changes.
- /// </p>
+ /// <p/>
///
/// </summary>
/// <throws> CorruptIndexException if the index is corrupt </throws>
@@ -1626,7 +1626,7 @@
/// segments_N file.
///
/// <p/><b>WARNING</b>: this API is new and experimental and
- /// may suddenly change.</p>
+ /// may suddenly change.<p/>
/// </summary>
public virtual IndexCommit GetIndexCommit()
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexWriter.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs Mon Dec 14 19:05:31 2009
@@ -44,13 +44,13 @@
/// also {@link #IndexWriter(Directory, Analyzer) constructors}
/// with no <code>create</code> argument which will create a new index
/// if there is not already an index at the provided path and otherwise
- /// open the existing index.</p>
+ /// open the existing index.<p/>
/// <p/>In either case, documents are added with {@link #AddDocument(Document)
/// addDocument} and removed with {@link #DeleteDocuments(Term)} or {@link
/// #DeleteDocuments(Query)}. A document can be updated with {@link
/// #UpdateDocument(Term, Document) updateDocument} (which just deletes
/// and then adds the entire document). When finished adding, deleting
- /// and updating documents, {@link #Close() close} should be called.</p>
+ /// and updating documents, {@link #Close() close} should be called.<p/>
/// <a name="flush"></a>
/// <p/>These changes are buffered in memory and periodically
/// flushed to the {@link Directory} (during the above method
@@ -69,7 +69,7 @@
/// also trigger one or more segment merges which by default
/// run with a background thread so as not to block the
/// addDocument calls (see <a href="#mergePolicy">below</a>
- /// for changing the {@link MergeScheduler}).</p>
+ /// for changing the {@link MergeScheduler}).<p/>
/// <a name="autoCommit"></a>
/// <p/>The optional <code>autoCommit</code> argument to the {@link
/// #IndexWriter(Directory, boolean, Analyzer) constructors}
@@ -96,7 +96,7 @@
/// followed by {@link #Commit()}. This is necessary when
/// Lucene is working with an external resource (for example,
/// a database) and both must either commit or rollback the
- /// transaction.</p>
+ /// transaction.<p/>
/// <p/>When <code>autoCommit</code> is <code>true</code> then
/// the writer will periodically commit on its own. [<b>Deprecated</b>: Note that in 3.0, IndexWriter will
/// no longer accept autoCommit=true (it will be hardwired to
@@ -110,22 +110,22 @@
/// see the changes to the index as of that commit. When
/// running in this mode, be careful not to refresh your
/// readers while optimize or segment merges are taking place
- /// as this can tie up substantial disk space.</p>
+ /// as this can tie up substantial disk space.<p/>
/// </summary>
/// <summary><p/>Regardless of <code>autoCommit</code>, an {@link
/// IndexReader} or {@link Lucene.Net.Search.IndexSearcher} will only see the
/// index as of the "point in time" that it was opened. Any
/// changes committed to the index after the reader was opened
- /// are not visible until the reader is re-opened.</p>
+ /// are not visible until the reader is re-opened.<p/>
/// <p/>If an index will not have more documents added for a while and optimal search
/// performance is desired, then either the full {@link #Optimize() optimize}
/// method or partial {@link #Optimize(int)} method should be
- /// called before the index is closed.</p>
+ /// called before the index is closed.<p/>
/// <p/>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
/// another <code>IndexWriter</code> on the same directory will lead to a
/// {@link LockObtainFailedException}. The {@link LockObtainFailedException}
/// is also thrown if an IndexReader on the same directory is used to delete documents
- /// from the index.</p>
+ /// from the index.<p/>
/// </summary>
/// <summary><a name="deletionPolicy"></a>
/// <p/>Expert: <code>IndexWriter</code> allows an optional
@@ -141,7 +141,7 @@
/// deleted out from under them. This is necessary on
/// filesystems like NFS that do not support "delete on last
/// close" semantics, which Lucene's "point in time" search
- /// normally relies on. </p>
+ /// normally relies on. <p/>
/// <a name="mergePolicy"></a> <p/>Expert:
/// <code>IndexWriter</code> allows you to separately change
/// the {@link MergePolicy} and the {@link MergeScheduler}.
@@ -153,7 +153,7 @@
/// {@link LogByteSizeMergePolicy}. Then, the {@link
/// MergeScheduler} is invoked with the requested merges and
/// it decides when and how to run the merges. The default is
- /// {@link ConcurrentMergeScheduler}. </p>
+ /// {@link ConcurrentMergeScheduler}. <p/>
/// <a name="OOME"></a><p/><b>NOTE</b>: if you hit an
/// OutOfMemoryError then IndexWriter will quietly record this
/// fact and block all future segment commits. This is a
@@ -165,7 +165,7 @@
/// #Rollback()}, to undo any changes to the index since the
/// last commit. If you opened the writer with autoCommit
/// false you can also just call {@link #Rollback()}
- /// directly.</p>
+ /// directly.<p/>
/// <a name="thread-safety"></a><p/><b>NOTE</b>: {@link
/// <code>IndexWriter</code>} instances are completely thread
/// safe, meaning multiple threads can call any of its
@@ -173,7 +173,7 @@
/// external synchronization, you should <b>not</b>
/// synchronize on the <code>IndexWriter</code> instance as
/// this may cause deadlock; use your own (non-Lucene) objects
- /// instead. </p>
+ /// instead. <p/>
/// </summary>
/*
@@ -372,36 +372,36 @@
/// experiment in your situation to determine if it's
/// faster enough. As this is a new and experimental
/// feature, please report back on your findings so we can
- /// learn, improve and iterate.</p>
+ /// learn, improve and iterate.<p/>
///
/// <p/>The resulting reader suppports {@link
/// IndexReader#reopen}, but that call will simply forward
/// back to this method (though this may change in the
- /// future).</p>
+ /// future).<p/>
///
/// <p/>The very first time this method is called, this
/// writer instance will make every effort to pool the
/// readers that it opens for doing merges, applying
/// deletes, etc. This means additional resources (RAM,
- /// file descriptors, CPU time) will be consumed.</p>
+ /// file descriptors, CPU time) will be consumed.<p/>
///
/// <p/>For lower latency on reopening a reader, you may
/// want to call {@link #setMergedSegmentWarmer} to
/// pre-warm a newly merged segment before it's committed
- /// to the index.</p>
+ /// to the index.<p/>
///
/// <p/>If an addIndexes* call is running in another thread,
/// then this reader will only search those segments from
/// the foreign index that have been successfully copied
- /// over, so far</p>.
+ /// over, so far<p/>.
///
/// <p/><b>NOTE</b>: Once the writer is closed, any
/// outstanding readers may continue to be used. However,
/// if you attempt to reopen any of those readers, you'll
- /// hit an {@link AlreadyClosedException}.</p>
+ /// hit an {@link AlreadyClosedException}.<p/>
///
/// <p/><b>NOTE:</b> This API is experimental and might
- /// change in incompatible ways in the next release.</p>
+ /// change in incompatible ways in the next release.<p/>
///
/// </summary>
/// <returns> IndexReader that covers entire index plus all
@@ -962,12 +962,12 @@
/// this just returns the value previously set with
/// setUseCompoundFile(boolean), or the default value
/// (true). You cannot use this to query the status of
- /// previously flushed segments.</p>
+ /// previously flushed segments.<p/>
///
/// <p/>Note that this method is a convenience method: it
/// just calls mergePolicy.getUseCompoundFile as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.</p>
+ /// Otherwise an IllegalArgumentException is thrown.<p/>
///
/// </summary>
/// <seealso cref="SetUseCompoundFile(boolean)">
@@ -979,12 +979,12 @@
/// <summary><p/>Setting to turn on usage of a compound file. When on,
/// multiple files for each segment are merged into a
- /// single file when a new segment is flushed.</p>
+ /// single file when a new segment is flushed.<p/>
///
/// <p/>Note that this method is a convenience method: it
/// just calls mergePolicy.setUseCompoundFile as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.</p>
+ /// Otherwise an IllegalArgumentException is thrown.<p/>
/// </summary>
public virtual void SetUseCompoundFile(bool value_Renamed)
{
@@ -2001,19 +2001,19 @@
/// interactive indexing, as this limits the length of
/// pauses while indexing to a few seconds. Larger values
/// are best for batched indexing and speedier
- /// searches.</p>
+ /// searches.<p/>
///
- /// <p/>The default value is {@link Integer#MAX_VALUE}.</p>
+ /// <p/>The default value is {@link Integer#MAX_VALUE}.<p/>
///
/// <p/>Note that this method is a convenience method: it
/// just calls mergePolicy.setMaxMergeDocs as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.</p>
+ /// Otherwise an IllegalArgumentException is thrown.<p/>
///
/// <p/>The default merge policy ({@link
/// LogByteSizeMergePolicy}) also allows you to set this
/// limit by net size (in MB) of the segment, using {@link
- /// LogByteSizeMergePolicy#setMaxMergeMB}.</p>
+ /// LogByteSizeMergePolicy#setMaxMergeMB}.<p/>
/// </summary>
public virtual void SetMaxMergeDocs(int maxMergeDocs)
{
@@ -2021,12 +2021,12 @@
}
/// <summary> <p/>Returns the largest segment (measured by document
- /// count) that may be merged with other segments.</p>
+ /// count) that may be merged with other segments.<p/>
///
/// <p/>Note that this method is a convenience method: it
/// just calls mergePolicy.getMaxMergeDocs as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.</p>
+ /// Otherwise an IllegalArgumentException is thrown.<p/>
///
/// </summary>
/// <seealso cref="setMaxMergeDocs">
@@ -2079,9 +2079,9 @@
/// #DISABLE_AUTO_FLUSH} to prevent triggering a flush due
/// to number of buffered documents. Note that if flushing
/// by RAM usage is also enabled, then the flush will be
- /// triggered by whichever comes first.</p>
+ /// triggered by whichever comes first.<p/>
///
- /// <p/>Disabled by default (writer flushes by RAM usage).</p>
+ /// <p/>Disabled by default (writer flushes by RAM usage).<p/>
///
/// </summary>
/// <throws> IllegalArgumentException if maxBufferedDocs is </throws>
@@ -2149,7 +2149,7 @@
/// Pass in {@link #DISABLE_AUTO_FLUSH} to prevent
/// triggering a flush due to RAM usage. Note that if
/// flushing by document count is also enabled, then the
- /// flush will be triggered by whichever comes first.</p>
+ /// flush will be triggered by whichever comes first.<p/>
///
/// <p/> <b>NOTE</b>: the account of RAM usage for pending
/// deletions is only approximate. Specifically, if you
@@ -2167,9 +2167,9 @@
/// less than 2048 MB. The precise limit depends on various factors, such as
/// how large your documents are, how many fields have norms, etc., so it's
/// best to set this value comfortably under 2048.
- /// </p>
+ /// <p/>
///
- /// <p/> The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.</p>
+ /// <p/> The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.<p/>
///
/// </summary>
/// <throws> IllegalArgumentException if ramBufferSize is </throws>
@@ -2200,8 +2200,8 @@
/// <summary> <p/>Determines the minimal number of delete terms required before the buffered
/// in-memory delete terms are applied and flushed. If there are documents
/// buffered in memory at the time, they are merged and a new segment is
- /// created.</p>
- /// <p/>Disabled by default (writer flushes by RAM usage).</p>
+ /// created.<p/>
+ /// <p/>Disabled by default (writer flushes by RAM usage).<p/>
///
/// </summary>
/// <throws> IllegalArgumentException if maxBufferedDeleteTerms </throws>
@@ -2241,7 +2241,7 @@
/// <p/>Note that this method is a convenience method: it
/// just calls mergePolicy.setMergeFactor as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.</p>
+ /// Otherwise an IllegalArgumentException is thrown.<p/>
///
/// <p/>This must never be less than 2. The default value is 10.
/// </summary>
@@ -2252,12 +2252,12 @@
/// <summary> <p/>Returns the number of segments that are merged at
/// once and also controls the total number of segments
- /// allowed to accumulate in the index.</p>
+ /// allowed to accumulate in the index.<p/>
///
/// <p/>Note that this method is a convenience method: it
/// just calls mergePolicy.getMergeFactor as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.</p>
+ /// Otherwise an IllegalArgumentException is thrown.<p/>
///
/// </summary>
/// <seealso cref="setMergeFactor">
@@ -2400,14 +2400,14 @@
/// be consistent. However, the close will not be complete
/// even though part of it (flushing buffered documents)
/// may have succeeded, so the write lock will still be
- /// held.</p>
+ /// held.<p/>
///
/// <p/> If you can correct the underlying cause (eg free up
/// some disk space) then you can call close() again.
/// Failing that, if you want to force the write lock to be
/// released (dangerous, because you may then lose buffered
/// docs in the IndexWriter instance) then you can do
- /// something like this:</p>
+ /// something like this:<p/>
///
/// <pre>
/// try {
@@ -2420,11 +2420,11 @@
/// </pre>
///
/// after which, you must be certain not to use the writer
- /// instance anymore.</p>
+ /// instance anymore.<p/>
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer, again. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <throws> CorruptIndexException if the index is corrupt </throws>
@@ -2441,14 +2441,14 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer, again. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// <p/><b>NOTE</b>: it is dangerous to always call
/// close(false), especially when IndexWriter is not open
/// for very long, because this can result in "merge
/// starvation" whereby long merges will never have a
/// chance to finish. This will cause too many segments in
- /// your index over time.</p>
+ /// your index over time.<p/>
///
/// </summary>
/// <param name="waitForMerges">if true, this call will block
@@ -2790,12 +2790,12 @@
/// may not have been added. Furthermore, it's possible
/// the index will have one segment in non-compound format
/// even when using compound files (when a merge has
- /// partially succeeded).</p>
+ /// partially succeeded).<p/>
///
/// <p/> This method periodically flushes pending documents
/// to the Directory (see <a href="#flush">above</a>), and
/// also periodically triggers segment merges in the index
- /// according to the {@link MergePolicy} in use.</p>
+ /// according to the {@link MergePolicy} in use.<p/>
///
/// <p/>Merges temporarily consume space in the
/// directory. The amount of space required is up to 1X the
@@ -2809,17 +2809,17 @@
///
/// <p/>Note that each term in the document can be no longer
/// than 16383 characters, otherwise an
- /// IllegalArgumentException will be thrown.</p>
+ /// IllegalArgumentException will be thrown.<p/>
///
/// <p/>Note that it's possible to create an invalid Unicode
/// string in java if a UTF16 surrogate pair is malformed.
/// In this case, the invalid characters are silently
/// replaced with the Unicode replacement character
- /// U+FFFD.</p>
+ /// U+FFFD.<p/>
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <throws> CorruptIndexException if the index is corrupt </throws>
@@ -2836,11 +2836,11 @@
///
/// <p/>See {@link #AddDocument(Document)} for details on
/// index and IndexWriter state after an Exception, and
- /// flushing/merging temporary free space requirements.</p>
+ /// flushing/merging temporary free space requirements.<p/>
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <throws> CorruptIndexException if the index is corrupt </throws>
@@ -2891,7 +2891,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <param name="term">the term to identify the documents to be deleted
@@ -2918,7 +2918,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <param name="terms">array of terms to identify the documents
@@ -2945,7 +2945,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <param name="query">the query to identify the documents to be deleted
@@ -2965,7 +2965,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <param name="queries">array of queries to identify the documents
@@ -2989,7 +2989,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <param name="term">the term to identify the document(s) to be
@@ -3013,7 +3013,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <param name="term">the term to identify the document(s) to be
@@ -3145,28 +3145,28 @@
/// <p/>It is recommended that this method be called upon completion of indexing. In
/// environments with frequent updates, optimize is best done during low volume times, if at all.
///
- /// </p>
- /// <p/>See http://www.gossamer-threads.com/lists/lucene/java-dev/47895 for more discussion. </p>
+ /// <p/>
+ /// <p/>See http://www.gossamer-threads.com/lists/lucene/java-dev/47895 for more discussion. <p/>
///
/// <p/>Note that optimize requires 2X the index size free
/// space in your Directory. For example, if your index
/// size is 10 MB then you need 20 MB free for optimize to
- /// complete.</p>
+ /// complete.<p/>
///
/// <p/>If some but not all readers re-open while an
/// optimize is underway, this will cause > 2X temporary
/// space to be consumed as those new readers will then
/// hold open the partially optimized segments at that
/// time. It is best not to re-open readers while optimize
- /// is running.</p>
+ /// is running.<p/>
///
/// <p/>The actual temporary usage could be much less than
- /// these figures (it depends on many factors).</p>
+ /// these figures (it depends on many factors).<p/>
///
/// <p/>In general, once the optimize completes, the total size of the
/// index will be less than the size of the starting index.
/// It could be quite a bit smaller (if there were many
- /// pending deletes) or just slightly smaller.</p>
+ /// pending deletes) or just slightly smaller.<p/>
///
/// <p/>If an Exception is hit during optimize(), for example
/// due to disk full, the index will not be corrupt and no
@@ -3176,17 +3176,17 @@
/// the index will be in non-compound format even when
/// using compound file format. This will occur when the
/// Exception is hit during conversion of the segment into
- /// compound format.</p>
+ /// compound format.<p/>
///
/// <p/>This call will optimize those segments present in
/// the index when the call started. If other threads are
/// still adding documents and flushing segments, those
/// newly created segments will not be optimized unless you
- /// call optimize again.</p>
+ /// call optimize again.<p/>
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <throws> CorruptIndexException if the index is corrupt </throws>
@@ -3204,7 +3204,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <param name="maxNumSegments">maximum number of segments left
@@ -3223,7 +3223,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
/// </summary>
public virtual void Optimize(bool doWait)
{
@@ -3238,7 +3238,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
/// </summary>
public virtual void Optimize(int maxNumSegments, bool doWait)
{
@@ -3368,7 +3368,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
/// </summary>
public virtual void ExpungeDeletes(bool doWait)
{
@@ -3455,7 +3455,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
/// </summary>
public virtual void ExpungeDeletes()
{
@@ -3473,7 +3473,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
/// </summary>
public void MaybeMerge()
{
@@ -3905,9 +3905,9 @@
/// <p/>This method will drop all buffered documents and will
/// remove all segments from the index. This change will not be
/// visible until a {@link #Commit()} has been called. This method
- /// can be rolled back using {@link #Rollback()}.</p>
+ /// can be rolled back using {@link #Rollback()}.<p/>
///
- /// <p/>NOTE: this method is much faster than using deleteDocuments( new MatchAllDocsQuery() ).</p>
+ /// <p/>NOTE: this method is much faster than using deleteDocuments( new MatchAllDocsQuery() ).<p/>
///
/// <p/>NOTE: this method will forcefully abort all merges
/// in progress. If other threads are running {@link
@@ -4028,7 +4028,7 @@
/// <summary> Wait for any currently outstanding merges to finish.
///
/// <p/>It is guaranteed that any merges started prior to calling this method
- /// will have completed once this method completes.</p>
+ /// will have completed once this method completes.<p/>
/// </summary>
public virtual void WaitForMerges()
{
@@ -4097,7 +4097,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <deprecated> Use {@link #addIndexesNoOptimize} instead,
@@ -4229,7 +4229,7 @@
/// handled: it does not commit a new segments_N file until
/// all indexes are added. This means if an Exception
/// occurs (for example disk full), then either no indexes
- /// will have been added or they all will have been.</p>
+ /// will have been added or they all will have been.<p/>
///
/// <p/>Note that this requires temporary free space in the
/// Directory up to 2X the sum of all input indexes
@@ -4237,20 +4237,20 @@
/// are open against the starting index, then temporary
/// free space required will be higher by the size of the
/// starting index (see {@link #Optimize()} for details).
- /// </p>
+ /// <p/>
///
/// <p/>Once this completes, the final size of the index
/// will be less than the sum of all input index sizes
/// (including the starting index). It could be quite a
/// bit smaller (if there were many pending deletes) or
- /// just slightly smaller.</p>
+ /// just slightly smaller.<p/>
///
/// <p/>
/// This requires this index not be among those to be added.
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <throws> CorruptIndexException if the index is corrupt </throws>
@@ -4434,8 +4434,8 @@
}
/// <summary>Merges the provided indexes into this index.
- /// <p/>After this completes, the index is optimized. </p>
- /// <p/>The provided IndexReaders are not closed.</p>
+ /// <p/>After this completes, the index is optimized. <p/>
+ /// <p/>The provided IndexReaders are not closed.<p/>
///
/// <p/><b>NOTE:</b> while this is running, any attempts to
/// add or delete documents (with another thread) will be
@@ -4444,11 +4444,11 @@
/// <p/>See {@link #AddIndexesNoOptimize(Directory[])} for
/// details on transactional semantics, temporary free
/// space required in the Directory, and non-CFS segments
- /// on an Exception.</p>
+ /// on an Exception.<p/>
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <throws> CorruptIndexException if the index is corrupt </throws>
@@ -4644,7 +4644,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <deprecated> please call {@link #Commit()}) instead
@@ -4667,7 +4667,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <seealso cref="PrepareCommit(Map)">
@@ -4688,7 +4688,7 @@
/// After calling this you must call either {@link
/// #Commit()} to finish the commit, or {@link
/// #Rollback()} to revert the commit and undo all changes
- /// done since the writer was opened.</p>
+ /// done since the writer was opened.<p/>
///
/// You can also just call {@link #Commit(Map)} directly
/// without prepareCommit first in which case that method
@@ -4696,7 +4696,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <param name="commitUserData">Opaque Map (String->String)
@@ -4753,7 +4753,7 @@
/// crash or power loss. Note that this does not wait for
/// any running background merges to finish. This may be a
/// costly operation, so you should test the cost in your
- /// application and do it only when really necessary.</p>
+ /// application and do it only when really necessary.<p/>
///
/// <p/> Note that this operation calls Directory.sync on
/// the index files. That call should not return until the
@@ -4765,11 +4765,11 @@
/// performance. If you have such a device, and it does
/// not have a battery backup (for example) then on power
/// loss it may still lose data. Lucene cannot guarantee
- /// consistency on such devices. </p>
+ /// consistency on such devices. <p/>
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
///
/// </summary>
/// <seealso cref="prepareCommit">
@@ -4788,7 +4788,7 @@
///
/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See <a
- /// href="#OOME">above</a> for details.</p>
+ /// href="#OOME">above</a> for details.<p/>
/// </summary>
public void Commit(System.Collections.Generic.IDictionary<string, string> commitUserData)
{
@@ -6618,7 +6618,7 @@
/// new near real-time reader after a merge completes.
///
/// <p/><b>NOTE:</b> This API is experimental and might
- /// change in incompatible ways in the next release.</p>
+ /// change in incompatible ways in the next release.<p/>
///
/// <p/><b>NOTE</b>: warm is called before any deletes have
/// been carried over to the merged segment.
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs Mon Dec 14 19:05:31 2009
@@ -53,11 +53,11 @@
/// than 50 MB) are best for interactive indexing, as this
/// limits the length of pauses while indexing to a few
/// seconds. Larger values are best for batched indexing
- /// and speedier searches.</p>
+ /// and speedier searches.<p/>
///
/// <p/>Note that {@link #setMaxMergeDocs} is also
/// used to check whether a segment is too large for
- /// merging (it's either or).</p>
+ /// merging (it's either or).<p/>
/// </summary>
public virtual void SetMaxMergeMB(double mb)
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/LogMergePolicy.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs Mon Dec 14 19:05:31 2009
@@ -27,7 +27,7 @@
/// (beyond the merge factor upper bound) are encountered,
/// all segments within the level are merged. You can get or
/// set the merge factor using {@link #GetMergeFactor()} and
- /// {@link #SetMergeFactor(int)} respectively.</p>
+ /// {@link #SetMergeFactor(int)} respectively.<p/>
///
/// <p/>This class is abstract and requires a subclass to
/// define the {@link #size} method which specifies how a
@@ -35,7 +35,7 @@
/// is one subclass that measures size by document count in
/// the segment. {@link LogByteSizeMergePolicy} is another
/// subclass that measures size as the total byte size of the
- /// file(s) for the segment.</p>
+ /// file(s) for the segment.<p/>
/// </summary>
public abstract class LogMergePolicy:MergePolicy
@@ -87,7 +87,7 @@
/// <summary><p/>Returns the number of segments that are merged at
/// once and also controls the total number of segments
- /// allowed to accumulate in the index.</p>
+ /// allowed to accumulate in the index.<p/>
/// </summary>
public virtual int GetMergeFactor()
{
@@ -520,14 +520,14 @@
/// interactive indexing, as this limits the length of
/// pauses while indexing to a few seconds. Larger values
/// are best for batched indexing and speedier
- /// searches.</p>
+ /// searches.<p/>
///
- /// <p/>The default value is {@link Integer#MAX_VALUE}.</p>
+ /// <p/>The default value is {@link Integer#MAX_VALUE}.<p/>
///
/// <p/>The default merge policy ({@link
/// LogByteSizeMergePolicy}) also allows you to set this
/// limit by net size (in MB) of the segment, using {@link
- /// LogByteSizeMergePolicy#setMaxMergeMB}.</p>
+ /// LogByteSizeMergePolicy#setMaxMergeMB}.<p/>
/// </summary>
public virtual void SetMaxMergeDocs(int maxMergeDocs)
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MergePolicy.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs Mon Dec 14 19:05:31 2009
@@ -24,7 +24,7 @@
/// <summary> <p/>Expert: a MergePolicy determines the sequence of
/// primitive merge operations to be used for overall merge
- /// and optimize operations.</p>
+ /// and optimize operations.<p/>
///
/// <p/>Whenever the segments in an index have been altered by
/// {@link IndexWriter}, either the addition of a newly
@@ -37,19 +37,19 @@
/// merges that should be done, or null if no merges are
/// necessary. When IndexWriter.optimize is called, it calls
/// {@link #findMergesForOptimize} and the MergePolicy should
- /// then return the necessary merges.</p>
+ /// then return the necessary merges.<p/>
///
/// <p/>Note that the policy can return more than one merge at
/// a time. In this case, if the writer is using {@link
/// SerialMergeScheduler}, the merges will be run
/// sequentially but if it is using {@link
- /// ConcurrentMergeScheduler} they will be run concurrently.</p>
+ /// ConcurrentMergeScheduler} they will be run concurrently.<p/>
///
/// <p/>The default MergePolicy is {@link
- /// LogByteSizeMergePolicy}.</p>
+ /// LogByteSizeMergePolicy}.<p/>
///
/// <p/><b>NOTE:</b> This API is new and still experimental
- /// (subject to change suddenly in the next release)</p>
+ /// (subject to change suddenly in the next release)<p/>
///
/// <p/><b>NOTE</b>: This class typically requires access to
/// package-private APIs (e.g. <code>SegmentInfos</code>) to do its job;
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MergeScheduler.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs Mon Dec 14 19:05:31 2009
@@ -23,10 +23,10 @@
/// <summary><p/>Expert: {@link IndexWriter} uses an instance
/// implementing this interface to execute the merges
/// selected by a {@link MergePolicy}. The default
- /// MergeScheduler is {@link ConcurrentMergeScheduler}.</p>
+ /// MergeScheduler is {@link ConcurrentMergeScheduler}.<p/>
///
/// <p/><b>NOTE:</b> This API is new and still experimental
- /// (subject to change suddenly in the next release)</p>
+ /// (subject to change suddenly in the next release)<p/>
///
/// <p/><b>NOTE</b>: This class typically requires access to
/// package-private APIs (eg, SegmentInfos) to do its job;
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MultiReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs Mon Dec 14 19:05:31 2009
@@ -44,8 +44,8 @@
/// <summary> <p/>Construct a MultiReader aggregating the named set of (sub)readers.
/// Directory locking for delete, undeleteAll, and setNorm operations is
- /// left to the subreaders. </p>
- /// <p/>Note that all subreaders are closed if this Multireader is closed.</p>
+ /// left to the subreaders. <p/>
+ /// <p/>Note that all subreaders are closed if this Multireader is closed.<p/>
/// </summary>
/// <param name="subReaders">set of (sub)readers
/// </param>
@@ -57,7 +57,7 @@
/// <summary> <p/>Construct a MultiReader aggregating the named set of (sub)readers.
/// Directory locking for delete, undeleteAll, and setNorm operations is
- /// left to the subreaders. </p>
+ /// left to the subreaders. <p/>
/// </summary>
/// <param name="closeSubReaders">indicates whether the subreaders should be closed
/// when this MultiReader is closed
@@ -98,7 +98,7 @@
}
/// <summary> Tries to reopen the subreaders.
- /// <br>
+ /// <br/>
/// If one or more subreaders could be re-opened (i. e. subReader.reopen()
/// returned a new instance != subReader), then a new MultiReader instance
/// is returned, otherwise this instance is returned.
@@ -125,7 +125,7 @@
/// <summary> Clones the subreaders.
/// (see {@link IndexReader#clone()}).
- /// <br>
+ /// <br/>
/// <p/>
/// If subreaders are shared, then the reference count of those
/// readers is increased to ensure that the subreaders remain open
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/ParallelReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs Mon Dec 14 19:05:31 2009
@@ -57,7 +57,7 @@
private bool hasDeletions;
/// <summary>Construct a ParallelReader.
- /// <p/>Note that all subreaders are closed if this ParallelReader is closed.</p>
+ /// <p/>Note that all subreaders are closed if this ParallelReader is closed.<p/>
/// </summary>
public ParallelReader():this(true)
{
@@ -144,7 +144,7 @@
}
/// <summary> Tries to reopen the subreaders.
- /// <br>
+ /// <br/>
/// If one or more subreaders could be re-opened (i. e. subReader.reopen()
/// returned a new instance != subReader), then a new ParallelReader instance
/// is returned, otherwise this instance is returned.
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/Payload.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs Mon Dec 14 19:05:31 2009
@@ -31,7 +31,7 @@
/// produces payload data.
/// <p/>
/// Use {@link TermPositions#GetPayloadLength()} and {@link TermPositions#GetPayload(byte[], int)}
- /// to retrieve the payloads from the index.<br>
+ /// to retrieve the payloads from the index.<br/>
///
/// </summary>
[Serializable]
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/SegmentInfo.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs Mon Dec 14 19:05:31 2009
@@ -29,7 +29,7 @@
/// to the segment.
///
/// * <p/><b>NOTE:</b> This API is new and still experimental
- /// (subject to change suddenly in the next release)</p>
+ /// (subject to change suddenly in the next release)<p/>
/// </summary>
public sealed class SegmentInfo : System.ICloneable
{