You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by di...@apache.org on 2009/12/14 15:13:08 UTC

svn commit: r890338 [1/4] - in /incubator/lucene.net/trunk/C#/src/Lucene.Net: Analysis/ Analysis/Standard/ Analysis/Tokenattributes/ Document/ Index/ QueryParser/ Search/ Search/Function/ Search/Payloads/ Search/Spans/ Store/ Util/

Author: digy
Date: Mon Dec 14 14:13:03 2009
New Revision: 890338

URL: http://svn.apache.org/viewvc?rev=890338&view=rev
Log:
<p> tag creates problem with xml documentation(in Intellisense help). Replaced with <p/>

Modified:
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Analyzer.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/BaseCharFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/CachingTokenFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/LowerCaseTokenizer.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/PorterStemFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/StopAnalyzer.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/StopFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TeeSinkTokenFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenizer.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/AbstractField.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/DateField.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/DateTools.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumberTools.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CheckIndex.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldReaderException.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommitPoint.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfos.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentMerger.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentReader.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SnapshotDeletionPolicy.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Term.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermDocs.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermEnum.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermPositions.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/QueryParser/FastCharStream.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/QueryParser/MultiFieldQueryParser.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/QueryParser/QueryParser.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/BooleanQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Collector.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ComplexExplanation.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ConstantScoreRangeQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DefaultSimilarity.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DisjunctionMaxQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DisjunctionSumScorer.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DocIdSetIterator.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Explanation.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCache.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCacheImpl.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCacheRangeFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldComparator.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldDoc.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldDocSortedHitQueue.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldSortedHitQueue.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldValueHitQueue.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Filter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FilteredQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FilteredTermEnum.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ByteFieldSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/CustomScoreQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/DocValues.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/FieldCacheSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/FieldScoreQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/FloatFieldSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/IntFieldSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/MultiValueSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/OrdFieldSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ReverseOrdFieldSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ShortFieldSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ValueSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ValueSourceQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FuzzyTermEnum.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/HitCollector.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/HitQueue.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Hits.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/IndexSearcher.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/MultiSearcher.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/MultiTermQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/NumericRangeFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/NumericRangeQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ParallelMultiSearcher.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Payloads/BoostingTermQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Payloads/PayloadTermQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/PhraseQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/PrefixQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/PrefixTermEnum.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Query.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/QueryWrapperFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/RangeFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/RangeQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ScoreDocComparator.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Scorer.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Searchable.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Searcher.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Similarity.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Sort.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/SortComparator.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/SortComparatorSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/SortField.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/NearSpansOrdered.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/Spans.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TermRangeFilter.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TermRangeQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TermRangeTermEnum.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TimeLimitedCollector.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopDocCollector.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopFieldCollector.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopFieldDocCollector.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopFieldDocs.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopScoreDocCollector.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Weight.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/WildcardQuery.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/WildcardTermEnum.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/Directory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FSDirectory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FileSwitchDirectory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/IndexInput.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/Lock.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/LockFactory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/MMapDirectory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/NIOFSDirectory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/NativeFSLockFactory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/RAMDirectory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/SimpleFSLockFactory.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/AttributeImpl.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/AttributeSource.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/FieldCacheSanityChecker.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/NumericUtils.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/OpenBitSet.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/PriorityQueue.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/SortedVIntList.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/UnicodeUtil.cs
    incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/Version.cs

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Analyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Analyzer.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Analyzer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Analyzer.cs Mon Dec 14 14:13:03 2009
@@ -26,7 +26,7 @@
 	
 	/// <summary>An Analyzer builds TokenStreams, which analyze text.  It thus represents a
 	/// policy for extracting index terms from text.
-	/// <p>
+	/// <p/>
 	/// Typical implementations first build a Tokenizer, which breaks the stream of
 	/// characters from the Reader into raw Tokens.  One or more TokenFilters may
 	/// then be applied to the output of the Tokenizer.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/BaseCharFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/BaseCharFilter.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/BaseCharFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/BaseCharFilter.cs Mon Dec 14 14:13:03 2009
@@ -25,7 +25,7 @@
 	/// {@link #addOffCorrectMap}, and then invoke the correct
 	/// method to correct an offset.
 	/// 
-	/// <p><b>NOTE</b>: This class is not particularly efficient.
+	/// <p/><b>NOTE</b>: This class is not particularly efficient.
 	/// For example, a new class instance is created for every
 	/// call to {@link #addOffCorrectMap}, which is then appended
 	/// to a private list.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/CachingTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/CachingTokenFilter.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/CachingTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/CachingTokenFilter.cs Mon Dec 14 14:13:03 2009
@@ -26,7 +26,7 @@
 	/// are intended to be consumed more than once. It caches
 	/// all token attribute states locally in a List.
 	/// 
-	/// <P>CachingTokenFilter implements the optional method
+	/// <p/>CachingTokenFilter implements the optional method
 	/// {@link TokenStream#Reset()}, which repositions the
 	/// stream to the first Token. 
 	/// </summary>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs Mon Dec 14 14:13:03 2009
@@ -24,9 +24,9 @@
 	
 	/// <summary> A filter that replaces accented characters in the ISO Latin 1 character set 
 	/// (ISO-8859-1) by their unaccented equivalent. The case will not be altered.
-	/// <p>
+	/// <p/>
 	/// For instance, '&agrave;' will be replaced by 'a'.
-	/// <p>
+	/// <p/>
 	/// 
 	/// </summary>
 	/// <deprecated> in favor of {@link ASCIIFoldingFilter} which covers a superset 

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/LowerCaseTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/LowerCaseTokenizer.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/LowerCaseTokenizer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/LowerCaseTokenizer.cs Mon Dec 14 14:13:03 2009
@@ -27,7 +27,7 @@
 	/// them to lower case.  While it is functionally equivalent to the combination
 	/// of LetterTokenizer and LowerCaseFilter, there is a performance advantage
 	/// to doing the two tasks at once, hence this (redundant) implementation.
-	/// <P>
+	/// <p/>
 	/// Note: this does a decent job for most European languages, but does a terrible
 	/// job for some Asian languages, where words are not separated by spaces.
 	/// </summary>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/NumericTokenStream.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs Mon Dec 14 14:13:03 2009
@@ -37,16 +37,16 @@
 	/// for indexing numeric values that can be used by {@link
 	/// NumericRangeQuery} or {@link NumericRangeFilter}.
 	/// 
-	/// <p>Note that for simple usage, {@link NumericField} is
+	/// <p/>Note that for simple usage, {@link NumericField} is
 	/// recommended.  {@link NumericField} disables norms and
 	/// term freqs, as they are not usually needed during
 	/// searching.  If you need to change these settings, you
 	/// should use this class.
 	/// 
-	/// <p>See {@link NumericField} for capabilities of fields
+	/// <p/>See {@link NumericField} for capabilities of fields
 	/// indexed numerically.</p>
 	/// 
-	/// <p>Here's an example usage, for an <code>int</code> field:
+	/// <p/>Here's an example usage, for an <code>int</code> field:
 	/// 
 	/// <pre>
 	///  Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value));
@@ -55,7 +55,7 @@
 	///  document.add(field);
 	/// </pre>
 	/// 
-	/// <p>For optimal performance, re-use the TokenStream and Field instance
+	/// <p/>For optimal performance, re-use the TokenStream and Field instance
 	/// for more than one document:
 	/// 
 	/// <pre>
@@ -72,21 +72,21 @@
 	///  }
 	/// </pre>
 	/// 
-	/// <p>This stream is not intended to be used in analyzers;
+	/// <p/>This stream is not intended to be used in analyzers;
 	/// it's more for iterating the different precisions during
 	/// indexing a specific numeric value.</p>
 	/// 
-	/// <p><b>NOTE</b>: as token streams are only consumed once
+	/// <p/><b>NOTE</b>: as token streams are only consumed once
 	/// the document is added to the index, if you index more
 	/// than one numeric field, use a separate <code>NumericTokenStream</code>
 	/// instance for each.</p>
 	/// 
-	/// <p>See {@link NumericRangeQuery} for more details on the
+	/// <p/>See {@link NumericRangeQuery} for more details on the
 	/// <a
 	/// href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
 	/// parameter as well as how numeric fields work under the hood.</p>
 	/// 
-	/// <p><font color="red"><b>NOTE:</b> This API is experimental and
+	/// <p/><font color="red"><b>NOTE:</b> This API is experimental and
 	/// might change in incompatible ways in the next release.</font>
 	/// 
 	/// </summary>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs Mon Dec 14 14:13:03 2009
@@ -24,7 +24,7 @@
 	/// fields require different analysis techniques.  Use {@link #addAnalyzer}
 	/// to add a non-default analyzer on a field name basis.
 	/// 
-	/// <p>Example usage:
+	/// <p/>Example usage:
 	/// 
 	/// <pre>
 	/// PerFieldAnalyzerWrapper aWrapper =
@@ -33,10 +33,10 @@
 	/// aWrapper.addAnalyzer("lastname", new KeywordAnalyzer());
 	/// </pre>
 	/// 
-	/// <p>In this example, StandardAnalyzer will be used for all fields except "firstname"
+	/// <p/>In this example, StandardAnalyzer will be used for all fields except "firstname"
 	/// and "lastname", for which KeywordAnalyzer will be used.
 	/// 
-	/// <p>A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing
+	/// <p/>A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing
 	/// and query parsing.
 	/// </summary>
 	public class PerFieldAnalyzerWrapper:Analyzer

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/PorterStemFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/PorterStemFilter.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/PorterStemFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/PorterStemFilter.cs Mon Dec 14 14:13:03 2009
@@ -26,12 +26,12 @@
 	/// Note: the input to the stemming filter must already be in lower case,
 	/// so you will need to use LowerCaseFilter or LowerCaseTokenizer farther
 	/// down the Tokenizer chain in order for this to work properly!
-	/// <P>
+	/// <p/>
 	/// To use this filter with other analyzers, you'll want to write an
 	/// Analyzer class that sets up the TokenStream chain as you want it.
 	/// To use this with LowerCaseTokenizer, for example, you'd write an
 	/// analyzer like this:
-	/// <P>
+	/// <p/>
 	/// <PRE>
 	/// class MyAnalyzer extends Analyzer {
 	/// public final TokenStream tokenStream(String fieldName, Reader reader) {

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs Mon Dec 14 14:13:03 2009
@@ -28,7 +28,7 @@
 	/// words.
 	/// 
 	/// <a name="version"/>
-	/// <p>
+	/// <p/>
 	/// You must specify the required {@link Version} compatibility when creating
 	/// StandardAnalyzer:
 	/// <ul>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Standard/StandardFilter.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardFilter.cs Mon Dec 14 14:13:03 2009
@@ -47,8 +47,8 @@
 		private TermAttribute termAtt;
 		
 		/// <summary>Returns the next token in the stream, or null at EOS.
-		/// <p>Removes <tt>'s</tt> from the end of words.
-		/// <p>Removes dots from acronyms.
+		/// <p/>Removes <tt>'s</tt> from the end of words.
+		/// <p/>Removes dots from acronyms.
 		/// </summary>
 		public override bool IncrementToken()
 		{

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs Mon Dec 14 14:13:03 2009
@@ -32,7 +32,7 @@
 	
 	/// <summary>A grammar-based tokenizer constructed with JFlex
 	/// 
-	/// <p> This should be a good tokenizer for most European-language documents:
+	/// <p/> This should be a good tokenizer for most European-language documents:
 	/// 
 	/// <ul>
 	/// <li>Splits words at punctuation characters, removing punctuation. However, a 
@@ -42,12 +42,12 @@
 	/// <li>Recognizes email addresses and internet hostnames as one token.
 	/// </ul>
 	/// 
-	/// <p>Many applications have specific tokenizer needs.  If this tokenizer does
+	/// <p/>Many applications have specific tokenizer needs.  If this tokenizer does
 	/// not suit your application, please consider copying this source code
 	/// directory to your project and maintaining your own grammar-based tokenizer.
 	/// 
 	/// <a name="version"/>
-	/// <p>
+	/// <p/>
 	/// You must specify the required {@link Version} compatibility when creating
 	/// StandardAnalyzer:
 	/// <ul>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/StopAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/StopAnalyzer.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/StopAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/StopAnalyzer.cs Mon Dec 14 14:13:03 2009
@@ -26,7 +26,7 @@
 	/// {@link StopFilter}.
 	/// 
 	/// <a name="version"/>
-	/// <p>
+	/// <p/>
 	/// You must specify the required {@link Version} compatibility when creating
 	/// StopAnalyzer:
 	/// <ul>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/StopFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/StopFilter.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/StopFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/StopFilter.cs Mon Dec 14 14:13:03 2009
@@ -291,13 +291,13 @@
 		
 		/// <summary> Set the default position increments behavior of every StopFilter created
 		/// from now on.
-		/// <p>
+		/// <p/>
 		/// Note: behavior of a single StopFilter instance can be modified with
 		/// {@link #SetEnablePositionIncrements(boolean)}. This static method allows
 		/// control over behavior of classes using StopFilters internally, for
 		/// example {@link Lucene.Net.Analysis.Standard.StandardAnalyzer
 		/// StandardAnalyzer} if used with the no-arg ctor.
-		/// <p>
+		/// <p/>
 		/// Default : false.
 		/// 
 		/// </summary>
@@ -325,11 +325,11 @@
 		/// lose information (positions of the original tokens)
 		/// during indexing.
 		/// 
-		/// <p> When set, when a token is stopped
+		/// <p/> When set, when a token is stopped
 		/// (omitted), the position increment of the following
 		/// token is incremented.
 		/// 
-		/// <p> <b>NOTE</b>: be sure to also
+		/// <p/> <b>NOTE</b>: be sure to also
 		/// set {@link QueryParser#setEnablePositionIncrements} if
 		/// you use QueryParser to create queries.
 		/// </summary>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TeeSinkTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/TeeSinkTokenFilter.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TeeSinkTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TeeSinkTokenFilter.cs Mon Dec 14 14:13:03 2009
@@ -63,7 +63,7 @@
 	/// ...
 	/// </pre>
 	/// In this case, the fields can be added in any order, because the sources are not used anymore and all sinks are ready.
-	/// <p>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
+	/// <p/>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
 	/// </summary>
 	public sealed class TeeSinkTokenFilter:TokenFilter
 	{

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Token.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs Mon Dec 14 14:13:03 2009
@@ -35,31 +35,31 @@
 	/// <summary>A Token is an occurrence of a term from the text of a field.  It consists of
 	/// a term's text, the start and end offset of the term in the text of the field,
 	/// and a type string.
-	/// <p>
+	/// <p/>
 	/// The start and end offsets permit applications to re-associate a token with
 	/// its source text, e.g., to display highlighted query terms in a document
 	/// browser, or to show matching text fragments in a <abbr
 	/// title="KeyWord In Context">KWIC</abbr> display, etc.
-	/// <p>
+	/// <p/>
 	/// The type is a string, assigned by a lexical analyzer
 	/// (a.k.a. tokenizer), naming the lexical or syntactic class that the token
 	/// belongs to.  For example an end of sentence marker token might be implemented
 	/// with type "eos".  The default token type is "word".  
-	/// <p>
+	/// <p/>
 	/// A Token can optionally have metadata (a.k.a. Payload) in the form of a variable
 	/// length byte array. Use {@link TermPositions#GetPayloadLength()} and 
 	/// {@link TermPositions#GetPayload(byte[], int)} to retrieve the payloads from the index.
 	/// </summary>
 	/// <summary><br><br>
 	/// </summary>
-	/// <summary><p><b>NOTE:</b> As of 2.9, Token implements all {@link Attribute} interfaces
+	/// <summary><p/><b>NOTE:</b> As of 2.9, Token implements all {@link Attribute} interfaces
 	/// that are part of core Lucene and can be found in the {@code tokenattributes} subpackage.
 	/// Even though it is not necessary to use Token anymore, with the new TokenStream API it can
 	/// be used as convenience class that implements all {@link Attribute}s, which is especially useful
 	/// to easily switch from the old to the new TokenStream API.
 	/// </summary>
 	/// <summary><br><br>
-	/// <p><b>NOTE:</b> As of 2.3, Token stores the term text
+	/// <p/><b>NOTE:</b> As of 2.3, Token stores the term text
 	/// internally as a malleable char[] termBuffer instead of
 	/// String termText.  The indexing code and core tokenizers
 	/// have been changed to re-use a single Token instance, changing
@@ -71,7 +71,7 @@
 	/// associated performance cost has been added (below).  The
 	/// {@link #TermText()} method has been deprecated.</p>
 	/// </summary>
-	/// <summary><p>Tokenizers and TokenFilters should try to re-use a Token instance when
+	/// <summary><p/>Tokenizers and TokenFilters should try to re-use a Token instance when
 	/// possible for best performance, by implementing the
 	/// {@link TokenStream#IncrementToken()} API.
 	/// Failing that, to create a new Token you should first use
@@ -87,7 +87,7 @@
 	/// set the length of the term text.  See <a target="_top"
 	/// href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
 	/// for details.</p>
-	/// <p>Typical Token reuse patterns:
+	/// <p/>Typical Token reuse patterns:
 	/// <ul>
 	/// <li> Copying text from a string (type is reset to {@link #DEFAULT_TYPE} if not
 	/// specified):<br/>
@@ -335,9 +335,9 @@
 		/// relative to the previous Token in a {@link TokenStream}, used in phrase
 		/// searching.
 		/// 
-		/// <p>The default value is one.
+		/// <p/>The default value is one.
 		/// 
-		/// <p>Some common uses for this are:<ul>
+		/// <p/>Some common uses for this are:<ul>
 		/// 
 		/// <li>Set it to zero to put multiple terms in the same position.  This is
 		/// useful if, e.g., a word has multiple stems.  Searches for phrases

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/TokenFilter.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenFilter.cs Mon Dec 14 14:13:03 2009
@@ -21,7 +21,7 @@
 {
 	
 	/// <summary> A TokenFilter is a TokenStream whose input is another TokenStream.
-	/// <p>
+	/// <p/>
 	/// This is an abstract class; subclasses must override {@link #IncrementToken()}.
 	/// 
 	/// </summary>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/TokenStream.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs Mon Dec 14 14:13:03 2009
@@ -35,7 +35,7 @@
 	
 	/// <summary> A <code>TokenStream</code> enumerates the sequence of tokens, either from
 	/// {@link Field}s of a {@link Document} or from query text.
-	/// <p>
+	/// <p/>
 	/// This is an abstract class. Concrete subclasses are:
 	/// <ul>
 	/// <li>{@link Tokenizer}, a <code>TokenStream</code> whose input is a Reader; and
@@ -46,14 +46,14 @@
 	/// has moved from being {@link Token} based to {@link Attribute} based. While
 	/// {@link Token} still exists in 2.9 as a convenience class, the preferred way
 	/// to store the information of a {@link Token} is to use {@link AttributeImpl}s.
-	/// <p>
+	/// <p/>
 	/// <code>TokenStream</code> now extends {@link AttributeSource}, which provides
 	/// access to all of the token {@link Attribute}s for the <code>TokenStream</code>.
 	/// Note that only one instance per {@link AttributeImpl} is created and reused
 	/// for every token. This approach reduces object creation and allows local
 	/// caching of references to the {@link AttributeImpl}s. See
 	/// {@link #IncrementToken()} for further details.
-	/// <p>
+	/// <p/>
 	/// <b>The workflow of the new <code>TokenStream</code> API is as follows:</b>
 	/// <ol>
 	/// <li>Instantiation of <code>TokenStream</code>/{@link TokenFilter}s which add/get
@@ -72,10 +72,10 @@
 	/// the attributes must be added during instantiation. Filters and consumers are
 	/// not required to check for availability of attributes in
 	/// {@link #IncrementToken()}.
-	/// <p>
+	/// <p/>
 	/// You can find some example code for the new API in the analysis package level
 	/// Javadoc.
-	/// <p>
+	/// <p/>
 	/// Sometimes it is desirable to capture a current state of a <code>TokenStream</code>
 	/// , e. g. for buffering purposes (see {@link CachingTokenFilter},
 	/// {@link TeeSinkTokenFilter}). For this usecase
@@ -290,18 +290,18 @@
 		/// but it only works if <b>all</b> <code>TokenStream</code>s use the new API and
 		/// implement {@link #IncrementToken}. This setting can only be enabled
 		/// globally.
-		/// <P>
+		/// <p/>
 		/// This setting only affects <code>TokenStream</code>s instantiated after this
 		/// call. All <code>TokenStream</code>s already created use the other setting.
-		/// <P>
+		/// <p/>
 		/// All core {@link Analyzer}s are compatible with this setting, if you have
 		/// your own <code>TokenStream</code>s that are also compatible, you should enable
 		/// this.
-		/// <P>
+		/// <p/>
 		/// When enabled, tokenization may throw {@link UnsupportedOperationException}
 		/// s, if the whole tokenizer chain is not compatible eg one of the
 		/// <code>TokenStream</code>s does not implement the new <code>TokenStream</code> API.
-		/// <P>
+		/// <p/>
 		/// The default is <code>false</code>, so there is the fallback to the old API
 		/// available.
 		/// 

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs Mon Dec 14 14:13:03 2009
@@ -26,9 +26,9 @@
 	/// relative to the previous Token in a TokenStream, used in phrase
 	/// searching.
 	/// 
-	/// <p>The default value is one.
+	/// <p/>The default value is one.
 	/// 
-	/// <p>Some common uses for this are:<ul>
+	/// <p/>Some common uses for this are:<ul>
 	/// 
 	/// <li>Set it to zero to put multiple terms in the same position.  This is
 	/// useful if, e.g., a word has multiple stems.  Searches for phrases

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs Mon Dec 14 14:13:03 2009
@@ -27,9 +27,9 @@
 	/// relative to the previous Token in a {@link TokenStream}, used in phrase
 	/// searching.
 	/// 
-	/// <p>The default value is one.
+	/// <p/>The default value is one.
 	/// 
-	/// <p>Some common uses for this are:<ul>
+	/// <p/>Some common uses for this are:<ul>
 	/// 
 	/// <li>Set it to zero to put multiple terms in the same position.  This is
 	/// useful if, e.g., a word has multiple stems.  Searches for phrases

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Tokenizer.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenizer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenizer.cs Mon Dec 14 14:13:03 2009
@@ -23,9 +23,9 @@
 {
 	
 	/// <summary> A Tokenizer is a TokenStream whose input is a Reader.
-	/// <p>
+	/// <p/>
 	/// This is an abstract class; subclasses must override {@link #IncrementToken()}
-	/// <p>
+	/// <p/>
 	/// NOTE: Subclasses overriding {@link #IncrementToken()} must call
 	/// {@link AttributeSource#ClearAttributes()} before setting attributes.
 	/// Subclasses overriding {@link #IncrementToken()} must call

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/AbstractField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/AbstractField.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/AbstractField.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/AbstractField.cs Mon Dec 14 14:13:03 2009
@@ -126,7 +126,7 @@
 		/// multiplied into the score of all hits on this this field of this
 		/// document.
 		/// 
-		/// <p>The boost is multiplied by {@link Lucene.Net.Documents.Document#GetBoost()} of the document
+		/// <p/>The boost is multiplied by {@link Lucene.Net.Documents.Document#GetBoost()} of the document
 		/// containing this field.  If a document has multiple fields with the same
 		/// name, all such values are multiplied together.  This product is then
 		/// used to compute the norm factor for the field.  By
@@ -154,9 +154,9 @@
 		
 		/// <summary>Returns the boost factor for hits for this field.
 		/// 
-		/// <p>The default value is 1.0.
+		/// <p/>The default value is 1.0.
 		/// 
-		/// <p>Note: this value is not stored directly with the document in the index.
+		/// <p/>Note: this value is not stored directly with the document in the index.
 		/// Documents returned from {@link Lucene.Net.Index.IndexReader#Document(int)} and
 		/// {@link Lucene.Net.Search.Hits#Doc(int)} may thus not have the same value present as when
 		/// this field was indexed.
@@ -376,7 +376,7 @@
 		/// If set, omit term freq, positions and payloads from
 		/// postings for this field.
 		/// 
-		/// <p><b>NOTE</b>: While this option reduces storage space
+		/// <p/><b>NOTE</b>: While this option reduces storage space
 		/// required in the index, it also means any query
 		/// requiring positional information, such as {@link
 		/// PhraseQuery} or {@link SpanQuery} subclasses will

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/DateField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/DateField.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/DateField.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/DateField.cs Mon Dec 14 14:13:03 2009
@@ -33,18 +33,18 @@
 	/// The strings are structured so that lexicographic sorting orders by date,
 	/// which makes them suitable for use as field values and search terms.
 	/// 
-	/// <P>Note that this class saves dates with millisecond granularity,
+	/// <p/>Note that this class saves dates with millisecond granularity,
 	/// which is bad for {@link TermRangeQuery} and {@link PrefixQuery}, as those
 	/// queries are expanded to a BooleanQuery with a potentially large number
 	/// of terms when searching. Thus you might want to use
 	/// {@link DateTools} instead.
 	/// 
-	/// <P>
+	/// <p/>
 	/// Note: dates before 1970 cannot be used, and therefore cannot be
 	/// indexed when using this class. See {@link DateTools} for an
 	/// alternative without such a limitation.
 	/// 
-	/// <P>
+	/// <p/>
 	/// Another approach is {@link NumericUtils}, which provides
 	/// a sortable binary representation (prefix encoded) of numeric values, which
 	/// date/time are.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/DateTools.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/DateTools.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/DateTools.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/DateTools.cs Mon Dec 14 14:13:03 2009
@@ -28,15 +28,15 @@
 	/// them by date, which makes them suitable for use as field values 
 	/// and search terms.
 	/// 
-	/// <P>This class also helps you to limit the resolution of your dates. Do not
+	/// <p/>This class also helps you to limit the resolution of your dates. Do not
 	/// save dates with a finer resolution than you really need, as then
 	/// RangeQuery and PrefixQuery will require more memory and become slower.
 	/// 
-	/// <P>Compared to {@link DateField} the strings generated by the methods
+	/// <p/>Compared to {@link DateField} the strings generated by the methods
 	/// in this class take slightly more space, unless your selected resolution
 	/// is set to <code>Resolution.DAY</code> or lower.
 	/// 
-	/// <P>
+	/// <p/>
 	/// Another approach is {@link NumericUtils}, which provides
 	/// a sortable binary representation (prefix encoded) of numeric values, which
 	/// date/time are.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Document.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs Mon Dec 14 14:13:03 2009
@@ -33,7 +33,7 @@
 	/// should typically contain one or more stored fields which uniquely identify
 	/// it.
 	/// 
-	/// <p>Note that fields which are <i>not</i> {@link Fieldable#IsStored() stored} are
+	/// <p/>Note that fields which are <i>not</i> {@link Fieldable#IsStored() stored} are
 	/// <i>not</i> available in documents retrieved from the index, e.g. with {@link
 	/// ScoreDoc#doc}, {@link Searcher#Doc(int)} or {@link
 	/// IndexReader#Document(int)}.
@@ -106,9 +106,9 @@
 		/// <summary>Sets a boost factor for hits on any field of this document.  This value
 		/// will be multiplied into the score of all hits on this document.
 		/// 
-		/// <p>The default value is 1.0.
+		/// <p/>The default value is 1.0.
 		/// 
-		/// <p>Values are multiplied into the value of {@link Fieldable#GetBoost()} of
+		/// <p/>Values are multiplied into the value of {@link Fieldable#GetBoost()} of
 		/// each field in this document.  Thus, this method in effect sets a default
 		/// boost for the fields of this document.
 		/// 
@@ -122,7 +122,7 @@
 		
 		/// <summary>Returns, at indexing time, the boost factor as set by {@link #SetBoost(float)}. 
 		/// 
-		/// <p>Note that once a document is indexed this value is no longer available
+		/// <p/>Note that once a document is indexed this value is no longer available
 		/// from the index.  At search time, for retrieved documents, this method always 
 		/// returns 1. This however does not mean that the boost value set at  indexing 
 		/// time was ignored - it was just combined with other indexing time factors and 
@@ -138,10 +138,10 @@
 			return boost;
 		}
 		
-		/// <summary> <p>Adds a field to a document.  Several fields may be added with
+		/// <summary> <p/>Adds a field to a document.  Several fields may be added with
 		/// the same name.  In this case, if the fields are indexed, their text is
 		/// treated as though appended for the purposes of search.</p>
-		/// <p> Note that add like the removeField(s) methods only makes sense 
+		/// <p/> Note that add like the removeField(s) methods only makes sense 
 		/// prior to adding a document to an index. These methods cannot
 		/// be used to change the content of an existing index! In order to achieve this,
 		/// a document has to be deleted from an index and a new changed version of that
@@ -152,10 +152,10 @@
 			fields.Add(field);
 		}
 		
-		/// <summary> <p>Removes field with the specified name from the document.
+		/// <summary> <p/>Removes field with the specified name from the document.
 		/// If multiple fields exist with this name, this method removes the first field that has been added.
 		/// If there is no field with the specified name, the document remains unchanged.</p>
-		/// <p> Note that the removeField(s) methods like the add method only make sense 
+		/// <p/> Note that the removeField(s) methods like the add method only make sense 
 		/// prior to adding a document to an index. These methods cannot
 		/// be used to change the content of an existing index! In order to achieve this,
 		/// a document has to be deleted from an index and a new changed version of that
@@ -175,9 +175,9 @@
 			}
 		}
 		
-		/// <summary> <p>Removes all fields with the given name from the document.
+		/// <summary> <p/>Removes all fields with the given name from the document.
 		/// If there is no field with the specified name, the document remains unchanged.</p>
-		/// <p> Note that the removeField(s) methods like the add method only make sense 
+		/// <p/> Note that the removeField(s) methods like the add method only make sense 
 		/// prior to adding a document to an index. These methods cannot
 		/// be used to change the content of an existing index! In order to achieve this,
 		/// a document has to be deleted from an index and a new changed version of that
@@ -253,7 +253,7 @@
 		}
 		
 		/// <summary>Returns a List of all the fields in a document.
-		/// <p>Note that fields which are <i>not</i> {@link Fieldable#IsStored() stored} are
+		/// <p/>Note that fields which are <i>not</i> {@link Fieldable#IsStored() stored} are
 		/// <i>not</i> available in documents retrieved from the
 		/// index, e.g. {@link Searcher#Doc(int)} or {@link
 		/// IndexReader#Document(int)}.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Field.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs Mon Dec 14 14:13:03 2009
@@ -238,14 +238,14 @@
 		}
 		
 		
-		/// <summary><p>Expert: change the value of this field.  This can
+		/// <summary><p/>Expert: change the value of this field.  This can
 		/// be used during indexing to re-use a single Field
 		/// instance to improve indexing speed by avoiding GC cost
 		/// of new'ing and reclaiming Field instances.  Typically
 		/// a single {@link Document} instance is re-used as
 		/// well.  This helps most on small documents.</p>
 		/// 
-		/// <p>Each Field instance should only be used once
+		/// <p/>Each Field instance should only be used once
 		/// within a single {@link Document} instance.  See <a
 		/// href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed">ImproveIndexingSpeed</a>
 		/// for details.</p> 

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Fieldable.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs Mon Dec 14 14:13:03 2009
@@ -25,7 +25,7 @@
 	
 	/// <summary> Synonymous with {@link Field}.
 	/// 
-	/// <p><bold>WARNING</bold>: This interface may change within minor versions, despite Lucene's backward compatibility requirements.
+	/// <p/><bold>WARNING</bold>: This interface may change within minor versions, despite Lucene's backward compatibility requirements.
 	/// This means new methods may be added from version to version.  This change only affects the Fieldable API; other backwards
 	/// compatibility promises remain intact. For example, Lucene can still
 	/// read and write indices created within the same major version.
@@ -39,7 +39,7 @@
 		/// multiplied into the score of all hits on this this field of this
 		/// document.
 		/// 
-		/// <p>The boost is multiplied by {@link Lucene.Net.Documents.Document#GetBoost()} of the document
+		/// <p/>The boost is multiplied by {@link Lucene.Net.Documents.Document#GetBoost()} of the document
 		/// containing this field.  If a document has multiple fields with the same
 		/// name, all such values are multiplied together.  This product is then
 		/// used to compute the norm factor for the field.  By
@@ -63,9 +63,9 @@
 		
 		/// <summary>Returns the boost factor for hits for this field.
 		/// 
-		/// <p>The default value is 1.0.
+		/// <p/>The default value is 1.0.
 		/// 
-		/// <p>Note: this value is not stored directly with the document in the index.
+		/// <p/>Note: this value is not stored directly with the document in the index.
 		/// Documents returned from {@link Lucene.Net.Index.IndexReader#Document(int)} and
 		/// {@link Lucene.Net.Search.Hits#Doc(int)} may thus not have the same value present as when
 		/// this field was indexed.
@@ -81,7 +81,7 @@
 		System.String Name();
 		
 		/// <summary>The value of the field as a String, or null.
-		/// <p>
+		/// <p/>
 		/// For indexing, if isStored()==true, the stringValue() will be used as the stored field value
 		/// unless isBinary()==true, in which case binaryValue() will be used.
 		/// 
@@ -202,7 +202,7 @@
 		/// <summary> Return the raw byte[] for the binary field.  Note that
 		/// you must also call {@link #getBinaryLength} and {@link
 		/// #getBinaryOffset} to know which range of bytes in this
-		/// returned array belong to the field.<p>
+		/// returned array belong to the field.<p/>
 		/// About reuse: if you pass in the result byte[] and it is
 		/// used, likely the underlying implementation will hold
 		/// onto this byte[] and return it in future calls to

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumberTools.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/NumberTools.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumberTools.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumberTools.cs Mon Dec 14 14:13:03 2009
@@ -28,12 +28,12 @@
 	/// <summary> Provides support for converting longs to Strings, and back again. The strings
 	/// are structured so that lexicographic sorting order is preserved.
 	/// 
-	/// <p>
+	/// <p/>
 	/// That is, if l1 is less than l2 for any two longs l1 and l2, then
 	/// NumberTools.longToString(l1) is lexicographically less than
 	/// NumberTools.longToString(l2). (Similarly for "greater than" and "equals".)
 	/// 
-	/// <p>
+	/// <p/>
 	/// This class handles <b>all</b> long values (unlike
 	/// {@link Lucene.Net.Documents.DateField}).
 	/// 

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/NumericField.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs Mon Dec 14 14:13:03 2009
@@ -29,7 +29,7 @@
 {
 	// javadocs
 	
-	/// <summary> <p>This class provides a {@link Field} that enables indexing
+	/// <summary> <p/>This class provides a {@link Field} that enables indexing
 	/// of numeric values for efficient range filtering and
 	/// sorting.  Here's an example usage, adding an int value:
 	/// <pre>
@@ -53,7 +53,7 @@
 	/// }
 	/// </pre>
 	/// 
-	/// <p>The java native types <code>int</code>, <code>long</code>,
+	/// <p/>The java native types <code>int</code>, <code>long</code>,
 	/// <code>float</code> and <code>double</code> are
 	/// directly supported.  However, any value that can be
 	/// converted into these native types can also be indexed.
@@ -66,7 +66,7 @@
 	/// (for year, month, etc.) to construct an <code>int</code> or
 	/// <code>long</code> value.</p>
 	/// 
-	/// <p>To perform range querying or filtering against a
+	/// <p/>To perform range querying or filtering against a
 	/// <code>NumericField</code>, use {@link NumericRangeQuery} or {@link
 	/// NumericRangeFilter}.  To sort according to a
 	/// <code>NumericField</code>, use the normal numeric sort types, eg
@@ -74,25 +74,25 @@
 	/// will not work with these fields).  <code>NumericField</code> values
 	/// can also be loaded directly from {@link FieldCache}.</p>
 	/// 
-	/// <p>By default, a <code>NumericField</code>'s value is not stored but
+	/// <p/>By default, a <code>NumericField</code>'s value is not stored but
 	/// is indexed for range filtering and sorting.  You can use
 	/// the {@link #NumericField(String,Field.Store,boolean)}
 	/// constructor if you need to change these defaults.</p>
 	/// 
-	/// <p>You may add the same field name as a <code>NumericField</code> to
+	/// <p/>You may add the same field name as a <code>NumericField</code> to
 	/// the same document more than once.  Range querying and
 	/// filtering will be the logical OR of all values; so a range query
 	/// will hit all documents that have at least one value in
 	/// the range. However sort behavior is not defined.  If you need to sort,
 	/// you should separately index a single-valued <code>NumericField</code>.</p>
 	/// 
-	/// <p>A <code>NumericField</code> will consume somewhat more disk space
+	/// <p/>A <code>NumericField</code> will consume somewhat more disk space
 	/// in the index than an ordinary single-valued field.
 	/// However, for a typical index that includes substantial
 	/// textual content per document, this increase will likely
 	/// be in the noise. </p>
 	/// 
-	/// <p>Within Lucene, each numeric value is indexed as a
+	/// <p/>Within Lucene, each numeric value is indexed as a
 	/// <em>trie</em> structure, where each term is logically
 	/// assigned to larger and larger pre-defined brackets (which
 	/// are simply lower-precision representations of the value).
@@ -113,30 +113,30 @@
 	/// to use {@link Integer#MAX_VALUE}, which produces one
 	/// term per value.
 	/// 
-	/// <p>For more information on the internals of numeric trie
+	/// <p/>For more information on the internals of numeric trie
 	/// indexing, including the <a
 	/// href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
 	/// configuration, see {@link NumericRangeQuery}. The format of
 	/// indexed values is described in {@link NumericUtils}.
 	/// 
-	/// <p>If you only need to sort by numeric value, and never
+	/// <p/>If you only need to sort by numeric value, and never
 	/// run range querying/filtering, you can index using a
 	/// <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
 	/// This will minimize disk space consumed. </p>
 	/// 
-	/// <p>More advanced users can instead use {@link
+	/// <p/>More advanced users can instead use {@link
 	/// NumericTokenStream} directly, when indexing numbers. This
 	/// class is a wrapper around this token stream type for
 	/// easier, more intuitive usage.</p>
 	/// 
-	/// <p><b>NOTE:</b> This class is only used during
+	/// <p/><b>NOTE:</b> This class is only used during
 	/// indexing. When retrieving the stored field value from a
 	/// {@link Document} instance after search, you will get a
 	/// conventional {@link Fieldable} instance where the numeric
 	/// values are returned as {@link String}s (according to
 	/// <code>toString(value)</code> of the used data type).
 	/// 
-	/// <p><font color="red"><b>NOTE:</b> This API is
+	/// <p/><font color="red"><b>NOTE:</b> This API is
 	/// experimental and might change in incompatible ways in the
 	/// next release.</font>
 	/// 

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CheckIndex.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/CheckIndex.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CheckIndex.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CheckIndex.cs Mon Dec 14 14:13:03 2009
@@ -30,10 +30,10 @@
 	/// write a new segments file that removes reference to
 	/// problematic segments.
 	/// 
-	/// <p>As this tool checks every byte in the index, on a large
+	/// <p/>As this tool checks every byte in the index, on a large
 	/// index it can take quite a long time to run.
 	/// 
-	/// <p><b>WARNING</b>: this tool and API is new and
+	/// <p/><b>WARNING</b>: this tool and API is new and
 	/// experimental and is subject to suddenly change in the
 	/// next release.  Please make a complete backup of your
 	/// index before using this to fix your index!
@@ -53,7 +53,7 @@
 		
 		/// <summary> Returned from {@link #CheckIndex()} detailing the health and status of the index.
 		/// 
-		/// <p><b>WARNING</b>: this API is new and experimental and is
+		/// <p/><b>WARNING</b>: this API is new and experimental and is
 		/// subject to suddenly change in the next release.
 		/// 
 		/// </summary>
@@ -120,7 +120,7 @@
 			/// <summary>Holds the status of each segment in the index.
 			/// See {@link #segmentInfos}.
 			/// 
-			/// <p><b>WARNING</b>: this API is new and experimental and is
+			/// <p/><b>WARNING</b>: this API is new and experimental and is
 			/// subject to suddenly change in the next release.
 			/// </summary>
 			public class SegmentInfoStatus
@@ -324,10 +324,10 @@
 		/// <summary>Returns a {@link Status} instance detailing
 		/// the state of the index.
 		/// 
-		/// <p>As this method checks every byte in the index, on a large
+		/// <p/>As this method checks every byte in the index, on a large
 		/// index it can take quite a long time to run.
 		/// 
-		/// <p><b>WARNING</b>: make sure
+		/// <p/><b>WARNING</b>: make sure
 		/// you only call this when the index is not opened by any
 		/// writer. 
 		/// </summary>
@@ -342,11 +342,11 @@
 		/// </summary>
 		/// <param name="onlySegments">list of specific segment names to check
 		/// 
-		/// <p>As this method checks every byte in the specified
+		/// <p/>As this method checks every byte in the specified
 		/// segments, on a large index it can take quite a long
 		/// time to run.
 		/// 
-		/// <p><b>WARNING</b>: make sure
+		/// <p/><b>WARNING</b>: make sure
 		/// you only call this when the index is not opened by any
 		/// writer. 
 		/// </param>
@@ -879,12 +879,12 @@
 		/// you must separately open an {@link IndexWriter}, which
 		/// deletes unreferenced files when it's created.
 		/// 
-		/// <p><b>WARNING</b>: this writes a
+		/// <p/><b>WARNING</b>: this writes a
 		/// new segments file into the index, effectively removing
 		/// all documents in broken segments from the index.
 		/// BE CAREFUL.
 		/// 
-		/// <p><b>WARNING</b>: Make sure you only call this when the
+		/// <p/><b>WARNING</b>: Make sure you only call this when the
 		/// index is not opened  by any writer. 
 		/// </summary>
 		public virtual void  FixIndex(Status result)
@@ -909,7 +909,7 @@
 		}
 		
 		/// <summary>Command-line interface to check and fix an index.
-		/// <p>
+		/// <p/>
 		/// Run it like this:
 		/// <pre>
 		/// java -ea:Lucene.Net... Lucene.Net.Index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]
@@ -922,16 +922,16 @@
 		/// -segment _a</code>.  You can't use this with the -fix
 		/// option.
 		/// </ul>
-		/// <p><b>WARNING</b>: <code>-fix</code> should only be used on an emergency basis as it will cause
+		/// <p/><b>WARNING</b>: <code>-fix</code> should only be used on an emergency basis as it will cause
 		/// documents (perhaps many) to be permanently removed from the index.  Always make
 		/// a backup copy of your index before running this!  Do not run this tool on an index
 		/// that is actively being written to.  You have been warned!
-		/// <p>                Run without -fix, this tool will open the index, report version information
+		/// <p/>                Run without -fix, this tool will open the index, report version information
 		/// and report any exceptions it hits and what action it would take if -fix were
 		/// specified.  With -fix, this tool will remove any segments that have issues and
 		/// write a new segments_N file.  This means all documents contained in the affected
 		/// segments will be removed.
-		/// <p>
+		/// <p/>
 		/// This tool exits with exit code 1 if the index cannot be opened or has any
 		/// corruption, else 0.
 		/// </summary>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/DirectoryReader.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs Mon Dec 14 14:13:03 2009
@@ -1086,7 +1086,7 @@
 		
 		/// <summary> Expert: return the IndexCommit that this reader has opened.
 		/// <p/>
-		/// <p><b>WARNING</b>: this API is new and experimental and may suddenly change.</p>
+		/// <p/><b>WARNING</b>: this API is new and experimental and may suddenly change.</p>
 		/// </summary>
 		public override IndexCommit GetIndexCommit()
 		{

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/FieldInvertState.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs Mon Dec 14 14:13:03 2009
@@ -26,7 +26,7 @@
 	/// being added to the index. The information collected in this class is
 	/// also used to calculate the normalization factor for a field.
 	/// 
-	/// <p><b>WARNING</b>: This API is new and experimental, and may suddenly
+	/// <p/><b>WARNING</b>: This API is new and experimental, and may suddenly
 	/// change.</p>
 	/// </summary>
 	public sealed class FieldInvertState

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldReaderException.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/FieldReaderException.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldReaderException.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldReaderException.cs Mon Dec 14 14:13:03 2009
@@ -39,7 +39,7 @@
 		/// detail message of <tt>(cause==null &#63; null : cause.toString())</tt>
 		/// (which typically contains the class and detail message of
 		/// <tt>cause</tt>).  
-		/// <p>
+		/// <p/>
 		/// This constructor is useful for runtime exceptions
 		/// that are little more than wrappers for other throwables.
 		/// 
@@ -68,7 +68,7 @@
 		}
 		
 		/// <summary> Constructs a new runtime exception with the specified detail message and
-		/// cause.  <p>Note that the detail message associated with
+		/// cause.  <p/>Note that the detail message associated with
 		/// <code>cause</code> is <i>not</i> automatically incorporated in
 		/// this runtime exception's detail message.
 		/// 

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/FilterIndexReader.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs Mon Dec 14 14:13:03 2009
@@ -141,10 +141,10 @@
 		
 		protected internal IndexReader in_Renamed;
 		
-		/// <summary> <p>Construct a FilterIndexReader based on the specified base reader.
+		/// <summary> <p/>Construct a FilterIndexReader based on the specified base reader.
 		/// Directory locking for delete, undeleteAll, and setNorm operations is
 		/// left to the base reader.</p>
-		/// <p>Note that base reader is closed if this FilterIndexReader is closed.</p>
+		/// <p/>Note that base reader is closed if this FilterIndexReader is closed.</p>
 		/// </summary>
 		/// <param name="in">specified base reader.
 		/// </param>

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexCommit.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs Mon Dec 14 14:13:03 2009
@@ -22,21 +22,21 @@
 namespace Lucene.Net.Index
 {
 	
-	/// <summary> <p>Expert: represents a single commit into an index as seen by the
+	/// <summary> <p/>Expert: represents a single commit into an index as seen by the
 	/// {@link IndexDeletionPolicy} or {@link IndexReader}.</p>
 	/// 
-	/// <p> Changes to the content of an index are made visible
+	/// <p/> Changes to the content of an index are made visible
 	/// only after the writer who made that change commits by
 	/// writing a new segments file
 	/// (<code>segments_N</code>). This point in time, when the
 	/// action of writing of a new segments file to the directory
 	/// is completed, is an index commit.</p>
 	/// 
-	/// <p>Each index commit point has a unique segments file
+	/// <p/>Each index commit point has a unique segments file
 	/// associated with it. The segments file associated with a
 	/// later index commit point would have a larger N.</p>
 	/// 
-	/// <p><b>WARNING</b>: This API is a new and experimental and
+	/// <p/><b>WARNING</b>: This API is a new and experimental and
 	/// may suddenly change. </p>
 	/// </summary>
 	
@@ -57,10 +57,10 @@
 		/// <summary> Delete this commit point.  This only applies when using
 		/// the commit point in the context of IndexWriter's
 		/// IndexDeletionPolicy.
-		/// <p>
+		/// <p/>
 		/// Upon calling this, the writer is notified that this commit 
 		/// point should be deleted. 
-		/// <p>
+		/// <p/>
 		/// Decision that a commit-point should be deleted is taken by the {@link IndexDeletionPolicy} in effect
 		/// and therefore this should only be called by its {@link IndexDeletionPolicy#onInit onInit()} or 
 		/// {@link IndexDeletionPolicy#onCommit onCommit()} methods.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommitPoint.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexCommitPoint.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommitPoint.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommitPoint.cs Mon Dec 14 14:13:03 2009
@@ -35,10 +35,10 @@
         System.Collections.Generic.ICollection<string> GetFileNames();
 		
 		/// <summary> Delete this commit point.
-		/// <p>
+		/// <p/>
 		/// Upon calling this, the writer is notified that this commit 
 		/// point should be deleted. 
-		/// <p>
+		/// <p/>
 		/// Decision that a commit-point should be deleted is taken by the {@link IndexDeletionPolicy} in effect
 		/// and therefore this should only be called by its {@link IndexDeletionPolicy#onInit onInit()} or 
 		/// {@link IndexDeletionPolicy#onCommit onCommit()} methods.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexDeletionPolicy.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs Mon Dec 14 14:13:03 2009
@@ -20,9 +20,9 @@
 namespace Lucene.Net.Index
 {
 	
-	/// <summary> <p>Expert: policy for deletion of stale {@link IndexCommit index commits}. 
+	/// <summary> <p/>Expert: policy for deletion of stale {@link IndexCommit index commits}. 
 	/// 
-	/// <p>Implement this interface, and pass it to one
+	/// <p/>Implement this interface, and pass it to one
 	/// of the {@link IndexWriter} or {@link IndexReader}
 	/// constructors, to customize when older
 	/// {@link IndexCommit point-in-time commits}
@@ -31,7 +31,7 @@
 	/// removes old commits as soon as a new commit is done (this
 	/// matches the behavior before 2.2).</p>
 	/// 
-	/// <p>One expected use case for this (and the reason why it
+	/// <p/>One expected use case for this (and the reason why it
 	/// was first created) is to work around problems with an
 	/// index directory accessed via filesystems like NFS because
 	/// NFS does not provide the "delete on last close" semantics
@@ -50,17 +50,17 @@
 	public interface IndexDeletionPolicy
 	{
 		
-		/// <summary> <p>This is called once when a writer is first
+		/// <summary> <p/>This is called once when a writer is first
 		/// instantiated to give the policy a chance to remove old
 		/// commit points.</p>
 		/// 
-		/// <p>The writer locates all index commits present in the 
+		/// <p/>The writer locates all index commits present in the 
 		/// index directory and calls this method.  The policy may 
 		/// choose to delete some of the commit points, doing so by
 		/// calling method {@link IndexCommit#delete delete()} 
 		/// of {@link IndexCommit}.</p>
 		/// 
-		/// <p><u>Note:</u> the last CommitPoint is the most recent one,
+		/// <p/><u>Note:</u> the last CommitPoint is the most recent one,
 		/// i.e. the "front index state". Be careful not to delete it,
 		/// unless you know for sure what you are doing, and unless 
 		/// you can afford to lose the index content while doing that. 
@@ -72,15 +72,15 @@
 		/// </param>
 		void  OnInit(System.Collections.IList commits);
 		
-		/// <summary> <p>This is called each time the writer completed a commit.
+		/// <summary> <p/>This is called each time the writer completed a commit.
 		/// This gives the policy a chance to remove old commit points
 		/// with each commit.</p>
 		/// 
-		/// <p>The policy may now choose to delete old commit points 
+		/// <p/>The policy may now choose to delete old commit points 
 		/// by calling method {@link IndexCommit#delete delete()} 
 		/// of {@link IndexCommit}.</p>
 		/// 
-		/// <p>If writer has <code>autoCommit = true</code> then
+		/// <p/>If writer has <code>autoCommit = true</code> then
 		/// this method will in general be called many times during
 		/// one instance of {@link IndexWriter}.  If
 		/// <code>autoCommit = false</code> then this method is
@@ -88,7 +88,7 @@
 		/// called, or not at all if the {@link IndexWriter#abort}
 		/// is called. 
 		/// 
-		/// <p><u>Note:</u> the last CommitPoint is the most recent one,
+		/// <p/><u>Note:</u> the last CommitPoint is the most recent one,
 		/// i.e. the "front index state". Be careful not to delete it,
 		/// unless you know for sure what you are doing, and unless 
 		/// you can afford to lose the index content while doing that.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexModifier.cs?rev=890338&r1=890337&r2=890338&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs Mon Dec 14 14:13:03 2009
@@ -26,7 +26,7 @@
 namespace Lucene.Net.Index
 {
 	
-	/// <summary> <p>[Note that as of <b>2.1</b>, all but one of the
+	/// <summary> <p/>[Note that as of <b>2.1</b>, all but one of the
 	/// methods in this class are available via {@link
 	/// IndexWriter}.  The one method that is not available is
 	/// {@link #DeleteDocument(int)}.]</p>
@@ -36,10 +36,10 @@
 	/// do not need to care about implementation details such as that adding
 	/// documents is done via IndexWriter and deletion is done via IndexReader.
 	/// 
-	/// <p>Note that you cannot create more than one <code>IndexModifier</code> object
+	/// <p/>Note that you cannot create more than one <code>IndexModifier</code> object
 	/// on the same directory at the same time.
 	/// 
-	/// <p>Example usage:
+	/// <p/>Example usage:
 	/// 
 	/// <!-- ======================================================== -->
 	/// <!-- = Java Sourcecode to HTML automatically converted code = -->
@@ -71,16 +71,16 @@
 	/// <!-- =       END of automatically generated HTML code       = -->
 	/// <!-- ======================================================== -->
 	/// 
-	/// <p>Not all methods of IndexReader and IndexWriter are offered by this
+	/// <p/>Not all methods of IndexReader and IndexWriter are offered by this
 	/// class. If you need access to additional methods, either use those classes
 	/// directly or implement your own class that extends <code>IndexModifier</code>.
 	/// 
-	/// <p>Although an instance of this class can be used from more than one
+	/// <p/>Although an instance of this class can be used from more than one
 	/// thread, you will not get the best performance. You might want to use
 	/// IndexReader and IndexWriter directly for that (but you will need to
 	/// care about synchronization yourself then).
 	/// 
-	/// <p>While you can freely mix calls to add() and delete() using this class,
+	/// <p/>While you can freely mix calls to add() and delete() using this class,
 	/// you should batch you calls for best performance. For example, if you
 	/// want to update 20 documents, you should first delete all those documents,
 	/// then add all the new documents.
@@ -428,7 +428,7 @@
 		
 		/// <summary> If non-null, information about merges and a message when
 		/// {@link #GetMaxFieldLength()} is reached will be printed to this.
-		/// <p>Example: <tt>index.setInfoStream(System.err);</tt>
+		/// <p/>Example: <tt>index.setInfoStream(System.err);</tt>
 		/// </summary>
 		/// <seealso cref="IndexWriter.SetInfoStream(PrintStream)">
 		/// </seealso>
@@ -553,7 +553,7 @@
 		/// large value gives faster indexing.  At the same time, mergeFactor limits
 		/// the number of files open in a FSDirectory.
 		/// 
-		/// <p>The default value is 10.
+		/// <p/>The default value is 10.
 		/// 
 		/// </summary>
 		/// <seealso cref="IndexWriter.SetMaxBufferedDocs(int)">
@@ -598,7 +598,7 @@
 		/// indices are slower, indexing is faster.  Thus larger values (&gt; 10) are best
 		/// for batch index creation, and smaller values (&lt; 10) for indices that are
 		/// interactively maintained.
-		/// <p>This must never be less than 2.  The default value is 10.
+		/// <p/>This must never be less than 2.  The default value is 10.
 		/// 
 		/// </summary>
 		/// <seealso cref="IndexWriter.SetMergeFactor(int)">