You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by cc...@apache.org on 2011/11/06 06:24:44 UTC

[Lucene.Net] svn commit: r1198132 [1/17] - in /incubator/lucene.net/trunk/src: contrib/Analyzers/AR/ contrib/Analyzers/BR/ contrib/Analyzers/CJK/ contrib/Analyzers/Cz/ contrib/Analyzers/De/ contrib/Analyzers/Fr/ contrib/Analyzers/Miscellaneous/ contrib/Analyzers/NG...

Author: ccurrens
Date: Sun Nov  6 05:24:26 2011
New Revision: 1198132

URL: http://svn.apache.org/viewvc?rev=1198132&view=rev
Log:
[LUCENENET-438] Converted remaining javadoc notation comments to .net xml comments.  (core and contrib code files)

Modified:
    incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/De/GermanStemmer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Payloads/PayloadHelper.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/Codec/OneDimensionalNonWeightedTokenSettingsCodec.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/Codec/SimpleThreeDimensionalTokenSettingsCodec.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/Codec/TokenSettingsCodec.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/Codec/TwoDimensionalNonWeightedSynonymTokenSettingsCodec.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/Matrix/Column.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/Matrix/Matrix.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/Matrix/MatrixPermutationIterator.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/Matrix/Row.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleMatrixFilter.cs
    incubator/lucene.net/trunk/src/contrib/Analyzers/Shingle/TokenPositioner.cs
    incubator/lucene.net/trunk/src/contrib/Core/Index/FieldEnumerator.cs
    incubator/lucene.net/trunk/src/contrib/Core/Index/SegmentsGenCommit.cs
    incubator/lucene.net/trunk/src/contrib/Core/Index/TermVectorEnumerator.cs
    incubator/lucene.net/trunk/src/contrib/Core/Util/Cache/SegmentCache.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/BaseFragmentsBuilder.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/FastVectorHighlighter.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldFragList.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldPhraseList.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldQuery.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldTermStack.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/FragListBuilder.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/FragmentsBuilder.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/ScoreOrderFragmentsBuilder.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/SimpleFragListBuilder.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/SimpleFragmentsBuilder.cs
    incubator/lucene.net/trunk/src/contrib/FastVectorHighlighter/VectorHighlightMapper.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/DefaultEncoder.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/Fragmenter.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/GradientFormatter.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/Highlighter.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/NullFragmenter.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/QueryScorer.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/QueryTermExtractor.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/Scorer.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/SimpleFragmenter.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/SimpleHTMLEncoder.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/SimpleHTMLFormatter.cs
    incubator/lucene.net/trunk/src/contrib/Highlighter/TokenSources.cs
    incubator/lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs
    incubator/lucene.net/trunk/src/contrib/Queries/BoostingQuery.cs
    incubator/lucene.net/trunk/src/contrib/Queries/DuplicateFilter.cs
    incubator/lucene.net/trunk/src/contrib/Queries/FilterClause.cs
    incubator/lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs
    incubator/lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs
    incubator/lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThisQuery.cs
    incubator/lucene.net/trunk/src/contrib/Queries/Similar/SimilarityQueries.cs
    incubator/lucene.net/trunk/src/contrib/Queries/TermsFilter.cs
    incubator/lucene.net/trunk/src/contrib/Regex/IRegexCapabilities.cs
    incubator/lucene.net/trunk/src/contrib/Regex/Properties/AssemblyInfo.cs
    incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs
    incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs
    incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs
    incubator/lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs
    incubator/lucene.net/trunk/src/contrib/Spatial/Geometry/LatLng.cs
    incubator/lucene.net/trunk/src/contrib/Spatial/Tier/Projectors/CartesianTierPlotter.cs
    incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/JaroWinklerDistance.cs
    incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LevenshteinDistance.cs
    incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/NGramDistance.cs
    incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs
    incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs
    incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/StringDistance.cs
    incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs
    incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs
    incubator/lucene.net/trunk/src/core/Analysis/ASCIIFoldingFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/Analyzer.cs
    incubator/lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/CachingTokenFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/CharArraySet.cs
    incubator/lucene.net/trunk/src/core/Analysis/CharFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/CharReader.cs
    incubator/lucene.net/trunk/src/core/Analysis/CharStream.cs
    incubator/lucene.net/trunk/src/core/Analysis/ISOLatin1AccentFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/LetterTokenizer.cs
    incubator/lucene.net/trunk/src/core/Analysis/LowerCaseTokenizer.cs
    incubator/lucene.net/trunk/src/core/Analysis/MappingCharFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/NormalizeCharMap.cs
    incubator/lucene.net/trunk/src/core/Analysis/NumericTokenStream.cs
    incubator/lucene.net/trunk/src/core/Analysis/PerFieldAnalyzerWrapper.cs
    incubator/lucene.net/trunk/src/core/Analysis/PorterStemFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs
    incubator/lucene.net/trunk/src/core/Analysis/SinkTokenizer.cs
    incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs
    incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs
    incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs
    incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs
    incubator/lucene.net/trunk/src/core/Analysis/StopFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/TeeTokenFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/Token.cs
    incubator/lucene.net/trunk/src/core/Analysis/TokenFilter.cs
    incubator/lucene.net/trunk/src/core/Analysis/TokenStream.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttribute.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/FlagsAttributeImpl.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PayloadAttributeImpl.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TermAttribute.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TermAttributeImpl.cs
    incubator/lucene.net/trunk/src/core/Analysis/Tokenizer.cs
    incubator/lucene.net/trunk/src/core/Analysis/WhitespaceAnalyzer.cs
    incubator/lucene.net/trunk/src/core/Analysis/WhitespaceTokenizer.cs
    incubator/lucene.net/trunk/src/core/Analysis/WordlistLoader.cs
    incubator/lucene.net/trunk/src/core/Document/AbstractField.cs
    incubator/lucene.net/trunk/src/core/Document/CompressionTools.cs
    incubator/lucene.net/trunk/src/core/Document/DateField.cs
    incubator/lucene.net/trunk/src/core/Document/DateTools.cs
    incubator/lucene.net/trunk/src/core/Document/Document.cs
    incubator/lucene.net/trunk/src/core/Document/Field.cs
    incubator/lucene.net/trunk/src/core/Document/FieldSelector.cs
    incubator/lucene.net/trunk/src/core/Document/FieldSelectorResult.cs
    incubator/lucene.net/trunk/src/core/Document/Fieldable.cs
    incubator/lucene.net/trunk/src/core/Document/LoadFirstFieldSelector.cs
    incubator/lucene.net/trunk/src/core/Document/MapFieldSelector.cs
    incubator/lucene.net/trunk/src/core/Document/NumberTools.cs
    incubator/lucene.net/trunk/src/core/Document/NumericField.cs
    incubator/lucene.net/trunk/src/core/Document/SetBasedFieldSelector.cs
    incubator/lucene.net/trunk/src/core/Index/AbstractAllTermDocs.cs
    incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs
    incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs
    incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs
    incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs
    incubator/lucene.net/trunk/src/core/Index/DefaultSkipListReader.cs
    incubator/lucene.net/trunk/src/core/Index/DirectoryOwningReader.cs
    incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs
    incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs
    incubator/lucene.net/trunk/src/core/Index/FieldInfos.cs
    incubator/lucene.net/trunk/src/core/Index/FieldInvertState.cs
    incubator/lucene.net/trunk/src/core/Index/FieldReaderException.cs
    incubator/lucene.net/trunk/src/core/Index/FieldSortedTermVectorMapper.cs
    incubator/lucene.net/trunk/src/core/Index/FieldsReader.cs
    incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs
    incubator/lucene.net/trunk/src/core/Index/FormatPostingsDocsWriter.cs
    incubator/lucene.net/trunk/src/core/Index/IndexCommit.cs
    incubator/lucene.net/trunk/src/core/Index/IndexCommitPoint.cs
    incubator/lucene.net/trunk/src/core/Index/IndexDeletionPolicy.cs
    incubator/lucene.net/trunk/src/core/Index/IndexFileDeleter.cs
    incubator/lucene.net/trunk/src/core/Index/IndexFileNameFilter.cs
    incubator/lucene.net/trunk/src/core/Index/IndexFileNames.cs
    incubator/lucene.net/trunk/src/core/Index/IndexModifier.cs
    incubator/lucene.net/trunk/src/core/Index/IndexReader.cs
    incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs
    incubator/lucene.net/trunk/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
    incubator/lucene.net/trunk/src/core/Index/LogByteSizeMergePolicy.cs
    incubator/lucene.net/trunk/src/core/Index/LogDocMergePolicy.cs
    incubator/lucene.net/trunk/src/core/Index/LogMergePolicy.cs
    incubator/lucene.net/trunk/src/core/Index/MergePolicy.cs
    incubator/lucene.net/trunk/src/core/Index/MergeScheduler.cs
    incubator/lucene.net/trunk/src/core/Index/MultiLevelSkipListReader.cs
    incubator/lucene.net/trunk/src/core/Index/MultiReader.cs
    incubator/lucene.net/trunk/src/core/Index/MultipleTermPositions.cs
    incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs
    incubator/lucene.net/trunk/src/core/Index/Payload.cs
    incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs
    incubator/lucene.net/trunk/src/core/Index/RawPostingList.cs
    incubator/lucene.net/trunk/src/core/Index/SegmentInfos.cs
    incubator/lucene.net/trunk/src/core/Index/SegmentMerger.cs
    incubator/lucene.net/trunk/src/core/Index/SegmentTermPositionVector.cs
    incubator/lucene.net/trunk/src/core/Index/SerialMergeScheduler.cs
    incubator/lucene.net/trunk/src/core/Index/SnapshotDeletionPolicy.cs
    incubator/lucene.net/trunk/src/core/Index/SortedTermVectorMapper.cs
    incubator/lucene.net/trunk/src/core/Index/StaleReaderException.cs
    incubator/lucene.net/trunk/src/core/Index/TermBuffer.cs
    incubator/lucene.net/trunk/src/core/Index/TermDocs.cs
    incubator/lucene.net/trunk/src/core/Index/TermEnum.cs
    incubator/lucene.net/trunk/src/core/Index/TermFreqVector.cs
    incubator/lucene.net/trunk/src/core/Index/TermInfosWriter.cs
    incubator/lucene.net/trunk/src/core/Index/TermPositionVector.cs
    incubator/lucene.net/trunk/src/core/Index/TermPositions.cs
    incubator/lucene.net/trunk/src/core/Index/TermVectorEntryFreqSortedComparator.cs
    incubator/lucene.net/trunk/src/core/Index/TermVectorMapper.cs
    incubator/lucene.net/trunk/src/core/Index/TermVectorOffsetInfo.cs
    incubator/lucene.net/trunk/src/core/Index/TermVectorsReader.cs
    incubator/lucene.net/trunk/src/core/Index/TermsHash.cs
    incubator/lucene.net/trunk/src/core/Index/TermsHashConsumerPerField.cs
    incubator/lucene.net/trunk/src/core/LucenePackage.cs
    incubator/lucene.net/trunk/src/core/Messages/NLS.cs
    incubator/lucene.net/trunk/src/core/QueryParser/CharStream.cs
    incubator/lucene.net/trunk/src/core/QueryParser/FastCharStream.cs
    incubator/lucene.net/trunk/src/core/QueryParser/MultiFieldQueryParser.cs
    incubator/lucene.net/trunk/src/core/QueryParser/QueryParser.cs
    incubator/lucene.net/trunk/src/core/Search/BooleanClause.cs
    incubator/lucene.net/trunk/src/core/Search/BooleanQuery.cs
    incubator/lucene.net/trunk/src/core/Search/BooleanScorer.cs
    incubator/lucene.net/trunk/src/core/Search/BooleanScorer2.cs
    incubator/lucene.net/trunk/src/core/Search/CachingSpanFilter.cs
    incubator/lucene.net/trunk/src/core/Search/CachingWrapperFilter.cs
    incubator/lucene.net/trunk/src/core/Search/Collector.cs
    incubator/lucene.net/trunk/src/core/Search/ComplexExplanation.cs
    incubator/lucene.net/trunk/src/core/Search/ConjunctionScorer.cs
    incubator/lucene.net/trunk/src/core/Search/ConstantScoreQuery.cs
    incubator/lucene.net/trunk/src/core/Search/ConstantScoreRangeQuery.cs
    incubator/lucene.net/trunk/src/core/Search/DefaultSimilarity.cs
    incubator/lucene.net/trunk/src/core/Search/DisjunctionMaxScorer.cs
    incubator/lucene.net/trunk/src/core/Search/DisjunctionSumScorer.cs
    incubator/lucene.net/trunk/src/core/Search/DocIdSet.cs
    incubator/lucene.net/trunk/src/core/Search/DocIdSetIterator.cs
    incubator/lucene.net/trunk/src/core/Search/Explanation.cs
    incubator/lucene.net/trunk/src/core/Search/ExtendedFieldCache.cs
    incubator/lucene.net/trunk/src/core/Search/FieldCache.cs
    incubator/lucene.net/trunk/src/core/Search/FieldCacheImpl.cs
    incubator/lucene.net/trunk/src/core/Search/FieldCacheRangeFilter.cs
    incubator/lucene.net/trunk/src/core/Search/FieldCacheTermsFilter.cs
    incubator/lucene.net/trunk/src/core/Search/FieldComparator.cs
    incubator/lucene.net/trunk/src/core/Search/FieldComparatorSource.cs
    incubator/lucene.net/trunk/src/core/Search/FieldDoc.cs
    incubator/lucene.net/trunk/src/core/Search/FieldDocSortedHitQueue.cs
    incubator/lucene.net/trunk/src/core/Search/FieldSortedHitQueue.cs
    incubator/lucene.net/trunk/src/core/Search/FieldValueHitQueue.cs
    incubator/lucene.net/trunk/src/core/Search/Filter.cs
    incubator/lucene.net/trunk/src/core/Search/FilteredDocIdSet.cs
    incubator/lucene.net/trunk/src/core/Search/FilteredDocIdSetIterator.cs
    incubator/lucene.net/trunk/src/core/Search/FilteredQuery.cs
    incubator/lucene.net/trunk/src/core/Search/FilteredTermEnum.cs
    incubator/lucene.net/trunk/src/core/Search/Function/ByteFieldSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/CustomScoreProvider.cs
    incubator/lucene.net/trunk/src/core/Search/Function/CustomScoreQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Function/DocValues.cs
    incubator/lucene.net/trunk/src/core/Search/Function/FieldCacheSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/FieldScoreQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Function/FloatFieldSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/IntFieldSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/MultiValueSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/OrdFieldSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/ReverseOrdFieldSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/ShortFieldSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/ValueSource.cs
    incubator/lucene.net/trunk/src/core/Search/Function/ValueSourceQuery.cs
    incubator/lucene.net/trunk/src/core/Search/FuzzyQuery.cs
    incubator/lucene.net/trunk/src/core/Search/FuzzyTermEnum.cs
    incubator/lucene.net/trunk/src/core/Search/Hit.cs
    incubator/lucene.net/trunk/src/core/Search/HitCollector.cs
    incubator/lucene.net/trunk/src/core/Search/HitCollectorWrapper.cs
    incubator/lucene.net/trunk/src/core/Search/HitIterator.cs
    incubator/lucene.net/trunk/src/core/Search/HitQueue.cs
    incubator/lucene.net/trunk/src/core/Search/Hits.cs
    incubator/lucene.net/trunk/src/core/Search/IndexSearcher.cs
    incubator/lucene.net/trunk/src/core/Search/MatchAllDocsQuery.cs
    incubator/lucene.net/trunk/src/core/Search/MultiPhraseQuery.cs
    incubator/lucene.net/trunk/src/core/Search/MultiSearcher.cs
    incubator/lucene.net/trunk/src/core/Search/MultiTermQuery.cs
    incubator/lucene.net/trunk/src/core/Search/MultiTermQueryWrapperFilter.cs
    incubator/lucene.net/trunk/src/core/Search/NumericRangeFilter.cs
    incubator/lucene.net/trunk/src/core/Search/NumericRangeQuery.cs
    incubator/lucene.net/trunk/src/core/Search/ParallelMultiSearcher.cs
    incubator/lucene.net/trunk/src/core/Search/Payloads/BoostingTermQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Payloads/PayloadNearQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Payloads/PayloadTermQuery.cs
    incubator/lucene.net/trunk/src/core/Search/PhrasePositions.cs
    incubator/lucene.net/trunk/src/core/Search/PhraseQuery.cs
    incubator/lucene.net/trunk/src/core/Search/PhraseScorer.cs
    incubator/lucene.net/trunk/src/core/Search/PositiveScoresOnlyCollector.cs
    incubator/lucene.net/trunk/src/core/Search/PrefixQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Query.cs
    incubator/lucene.net/trunk/src/core/Search/QueryFilter.cs
    incubator/lucene.net/trunk/src/core/Search/QueryWrapperFilter.cs
    incubator/lucene.net/trunk/src/core/Search/RangeFilter.cs
    incubator/lucene.net/trunk/src/core/Search/RangeQuery.cs
    incubator/lucene.net/trunk/src/core/Search/ReqExclScorer.cs
    incubator/lucene.net/trunk/src/core/Search/ReqOptSumScorer.cs
    incubator/lucene.net/trunk/src/core/Search/ScoreCachingWrappingScorer.cs
    incubator/lucene.net/trunk/src/core/Search/ScoreDocComparator.cs
    incubator/lucene.net/trunk/src/core/Search/Scorer.cs
    incubator/lucene.net/trunk/src/core/Search/Searchable.cs
    incubator/lucene.net/trunk/src/core/Search/Searcher.cs
    incubator/lucene.net/trunk/src/core/Search/Similarity.cs
    incubator/lucene.net/trunk/src/core/Search/SimilarityDelegator.cs
    incubator/lucene.net/trunk/src/core/Search/Sort.cs
    incubator/lucene.net/trunk/src/core/Search/SortComparator.cs
    incubator/lucene.net/trunk/src/core/Search/SortComparatorSource.cs
    incubator/lucene.net/trunk/src/core/Search/SortField.cs
    incubator/lucene.net/trunk/src/core/Search/SpanFilter.cs
    incubator/lucene.net/trunk/src/core/Search/SpanFilterResult.cs
    incubator/lucene.net/trunk/src/core/Search/SpanQueryFilter.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/FieldMaskingSpanQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/NearSpansOrdered.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/NearSpansUnordered.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/SpanFirstQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/SpanNearQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/SpanNotQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/SpanOrQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/SpanQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/SpanScorer.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/SpanTermQuery.cs
    incubator/lucene.net/trunk/src/core/Search/Spans/Spans.cs
    incubator/lucene.net/trunk/src/core/Search/TermQuery.cs
    incubator/lucene.net/trunk/src/core/Search/TermRangeFilter.cs
    incubator/lucene.net/trunk/src/core/Search/TermRangeQuery.cs
    incubator/lucene.net/trunk/src/core/Search/TermRangeTermEnum.cs
    incubator/lucene.net/trunk/src/core/Search/TermScorer.cs
    incubator/lucene.net/trunk/src/core/Search/TimeLimitedCollector.cs
    incubator/lucene.net/trunk/src/core/Search/TimeLimitingCollector.cs
    incubator/lucene.net/trunk/src/core/Search/TopDocCollector.cs
    incubator/lucene.net/trunk/src/core/Search/TopDocs.cs
    incubator/lucene.net/trunk/src/core/Search/TopDocsCollector.cs
    incubator/lucene.net/trunk/src/core/Search/TopFieldCollector.cs
    incubator/lucene.net/trunk/src/core/Search/TopFieldDocCollector.cs
    incubator/lucene.net/trunk/src/core/Search/TopFieldDocs.cs
    incubator/lucene.net/trunk/src/core/Search/TopScoreDocCollector.cs
    incubator/lucene.net/trunk/src/core/Search/Weight.cs
    incubator/lucene.net/trunk/src/core/Search/WildcardQuery.cs
    incubator/lucene.net/trunk/src/core/Search/WildcardTermEnum.cs
    incubator/lucene.net/trunk/src/core/Store/BufferedIndexInput.cs
    incubator/lucene.net/trunk/src/core/Store/BufferedIndexOutput.cs
    incubator/lucene.net/trunk/src/core/Store/CheckSumIndexOutput.cs
    incubator/lucene.net/trunk/src/core/Store/Directory.cs
    incubator/lucene.net/trunk/src/core/Store/FSDirectory.cs
    incubator/lucene.net/trunk/src/core/Store/FSLockFactory.cs
    incubator/lucene.net/trunk/src/core/Store/IndexInput.cs
    incubator/lucene.net/trunk/src/core/Store/IndexOutput.cs
    incubator/lucene.net/trunk/src/core/Store/Lock.cs
    incubator/lucene.net/trunk/src/core/Store/LockFactory.cs
    incubator/lucene.net/trunk/src/core/Store/LockObtainFailedException.cs
    incubator/lucene.net/trunk/src/core/Store/LockReleaseFailedException.cs
    incubator/lucene.net/trunk/src/core/Store/LockVerifyServer.cs
    incubator/lucene.net/trunk/src/core/Store/MMapDirectory.cs
    incubator/lucene.net/trunk/src/core/Store/NIOFSDirectory.cs
    incubator/lucene.net/trunk/src/core/Store/NativeFSLockFactory.cs
    incubator/lucene.net/trunk/src/core/Store/NoLockFactory.cs
    incubator/lucene.net/trunk/src/core/Store/RAMDirectory.cs
    incubator/lucene.net/trunk/src/core/Store/RAMInputStream.cs
    incubator/lucene.net/trunk/src/core/Store/RAMOutputStream.cs
    incubator/lucene.net/trunk/src/core/Store/SimpleFSDirectory.cs
    incubator/lucene.net/trunk/src/core/Store/SimpleFSLockFactory.cs
    incubator/lucene.net/trunk/src/core/Store/SingleInstanceLockFactory.cs
    incubator/lucene.net/trunk/src/core/Store/VerifyingLockFactory.cs
    incubator/lucene.net/trunk/src/core/SupportClass.cs
    incubator/lucene.net/trunk/src/core/Util/ArrayUtil.cs
    incubator/lucene.net/trunk/src/core/Util/AttributeImpl.cs
    incubator/lucene.net/trunk/src/core/Util/AttributeSource.cs
    incubator/lucene.net/trunk/src/core/Util/AverageGuessMemoryModel.cs
    incubator/lucene.net/trunk/src/core/Util/BitVector.cs
    incubator/lucene.net/trunk/src/core/Util/Cache/Cache.cs
    incubator/lucene.net/trunk/src/core/Util/Cache/SimpleLRUCache.cs
    incubator/lucene.net/trunk/src/core/Util/Cache/SimpleMapCache.cs
    incubator/lucene.net/trunk/src/core/Util/CloseableThreadLocal.cs
    incubator/lucene.net/trunk/src/core/Util/DocIdBitSet.cs
    incubator/lucene.net/trunk/src/core/Util/FieldCacheSanityChecker.cs
    incubator/lucene.net/trunk/src/core/Util/IndexableBinaryStringTools.cs
    incubator/lucene.net/trunk/src/core/Util/NumericUtils.cs
    incubator/lucene.net/trunk/src/core/Util/OpenBitSet.cs
    incubator/lucene.net/trunk/src/core/Util/OpenBitSetIterator.cs
    incubator/lucene.net/trunk/src/core/Util/PriorityQueue.cs
    incubator/lucene.net/trunk/src/core/Util/ReaderUtil.cs
    incubator/lucene.net/trunk/src/core/Util/ScorerDocQueue.cs
    incubator/lucene.net/trunk/src/core/Util/SortedVIntList.cs
    incubator/lucene.net/trunk/src/core/Util/StringHelper.cs
    incubator/lucene.net/trunk/src/core/Util/ToStringUtils.cs
    incubator/lucene.net/trunk/src/core/Util/Version.cs

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -24,19 +24,19 @@ using Lucene.Net.Util;
 namespace Lucene.Net.Analysis.AR
 {
     /**
-     * {@link Analyzer} for Arabic. 
-     * <p>
+     * <see cref="Analyzer"/> for Arabic. 
+     * <p/>
      * This analyzer implements light-stemming as specified by:
      * <i>
      * Light Stemming for Arabic Information Retrieval
      * </i>    
      * http://www.mtholyoke.edu/~lballest/Pubs/arab_stem05.pdf
-     * <p>
+     * <p/>
      * The analysis package contains three primary components:
      * <ul>
-     *  <li>{@link ArabicNormalizationFilter}: Arabic orthographic normalization.
-     *  <li>{@link ArabicStemFilter}: Arabic light stemming
-     *  <li>Arabic stop words file: a set of default Arabic stop words.
+     *  <li><see cref="ArabicNormalizationFilter"/>: Arabic orthographic normalization.</li>
+     *  <li><see cref="ArabicStemFilter"/>: Arabic light stemming</li>
+     *  <li>Arabic stop words file: a set of default Arabic stop words.</li>
      * </ul>
      * 
      */
@@ -63,9 +63,9 @@ namespace Lucene.Net.Analysis.AR
         private Version matchVersion;
 
         /**
-         * Builds an analyzer with the default stop words: {@link #DEFAULT_STOPWORD_FILE}.
+         * Builds an analyzer with the default stop words: <see cref="DEFAULT_STOPWORD_FILE"/>.
          *
-         * @deprecated Use {@link #ArabicAnalyzer(Version)} instead
+         * @deprecated Use <see cref="ArabicAnalyzer(Version)"/> instead
          */
         public ArabicAnalyzer() : this(Version.LUCENE_24)
         {
@@ -73,7 +73,7 @@ namespace Lucene.Net.Analysis.AR
         }
 
         /**
-         * Builds an analyzer with the default stop words: {@link #DEFAULT_STOPWORD_FILE}.
+         * Builds an analyzer with the default stop words: <see cref="DEFAULT_STOPWORD_FILE"/>.
          */
         public ArabicAnalyzer(Version matchVersion)
         {
@@ -92,7 +92,7 @@ namespace Lucene.Net.Analysis.AR
         /**
          * Builds an analyzer with the given stop words.
          *
-         * @deprecated Use {@link #ArabicAnalyzer(Version, String[])} instead
+         * @deprecated Use <see cref="ArabicAnalyzer(Lucene.Net.Util.Version, string[])"/> instead
          */
         public ArabicAnalyzer(string[] stopwords): this(Version.LUCENE_24, stopwords)
         {
@@ -110,7 +110,7 @@ namespace Lucene.Net.Analysis.AR
         /**
          * Builds an analyzer with the given stop words.
          *
-         * @deprecated Use {@link #ArabicAnalyzer(Version, Hashtable)} instead
+         * @deprecated Use <see cref="ArabicAnalyzer(Version, Hashtable)"/> instead
          */
         public ArabicAnalyzer(Hashtable stopwords) : this(Version.LUCENE_24, stopwords)
         {
@@ -127,9 +127,9 @@ namespace Lucene.Net.Analysis.AR
 
         //DIGY
         ///**
-        // * Builds an analyzer with the given stop words.  Lines can be commented out using {@link #STOPWORDS_COMMENT}
+        // * Builds an analyzer with the given stop words.  Lines can be commented out using <see cref="STOPWORDS_COMMENT"/>
         // *
-        // * @deprecated Use {@link #ArabicAnalyzer(Version, File)} instead
+        // * @deprecated Use <see cref="ArabicAnalyzer(Version, File)"/> instead
         // */
         //public ArabicAnalyzer(File stopwords)
         //{
@@ -137,7 +137,7 @@ namespace Lucene.Net.Analysis.AR
         //}
 
         ///**
-        // * Builds an analyzer with the given stop words.  Lines can be commented out using {@link #STOPWORDS_COMMENT}
+        // * Builds an analyzer with the given stop words.  Lines can be commented out using <see cref="STOPWORDS_COMMENT"/>
         // */
         //public ArabicAnalyzer(Version matchVersion, File stopwords)
         //{
@@ -147,11 +147,11 @@ namespace Lucene.Net.Analysis.AR
 
 
         /**
-         * Creates a {@link TokenStream} which tokenizes all the text in the provided {@link Reader}.
+         * Creates a <see cref="TokenStream"/> which tokenizes all the text in the provided <see cref="TextReader"/>.
          *
-         * @return  A {@link TokenStream} built from an {@link ArabicLetterTokenizer} filtered with
-         * 			{@link LowerCaseFilter}, {@link StopFilter}, {@link ArabicNormalizationFilter}
-         *            and {@link ArabicStemFilter}.
+         * <returns>A <see cref="TokenStream"/> built from an <see cref="ArabicLetterTokenizer"/> filtered with
+         * 			<see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>, <see cref="ArabicNormalizationFilter"/>
+         *            and <see cref="ArabicStemFilter"/>.</returns>
          */
         public override TokenStream TokenStream(string fieldName, TextReader reader)
         {
@@ -171,12 +171,12 @@ namespace Lucene.Net.Analysis.AR
         };
 
         /**
-         * Returns a (possibly reused) {@link TokenStream} which tokenizes all the text 
-         * in the provided {@link Reader}.
+         * Returns a (possibly reused) <see cref="TokenStream"/> which tokenizes all the text 
+         * in the provided <see cref="TextReader"/>.
          *
-         * @return  A {@link TokenStream} built from an {@link ArabicLetterTokenizer} filtered with
-         *            {@link LowerCaseFilter}, {@link StopFilter}, {@link ArabicNormalizationFilter}
-         *            and {@link ArabicStemFilter}.
+         * <returns>A <see cref="TokenStream"/> built from an <see cref="ArabicLetterTokenizer"/> filtered with
+         *            <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>, <see cref="ArabicNormalizationFilter"/>
+         *            and <see cref="ArabicStemFilter"/>.</returns>
          */
         public override TokenStream ReusableTokenStream(string fieldName, TextReader reader)
         {

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.AR
 
         /** 
          * Allows for Letter category or NonspacingMark category
-         * @see org.apache.lucene.analysis.LetterTokenizer#isTokenChar(char)
+         * <see cref="LetterTokenizer.IsTokenChar(char)"/>
          */
         protected override bool IsTokenChar(char c)
         {

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.AR
 {
 
     /**
-     * A {@link TokenFilter} that applies {@link ArabicNormalizer} to normalize the orthography.
+     * A <see cref="TokenFilter"/> that applies <see cref="ArabicNormalizer"/> to normalize the orthography.
      * 
      */
 

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -28,16 +28,16 @@ namespace Lucene.Net.Analysis.AR
 {
     /**
      *  Normalizer for Arabic.
-     *  <p>
+     *  <p/>
      *  Normalization is done in-place for efficiency, operating on a termbuffer.
-     *  <p>
+     *  <p/>
      *  Normalization is defined as:
      *  <ul>
-     *  <li> Normalization of hamza with alef seat to a bare alef.
-     *  <li> Normalization of teh marbuta to heh
-     *  <li> Normalization of dotless yeh (alef maksura) to yeh.
-     *  <li> Removal of Arabic diacritics (the harakat)
-     *  <li> Removal of tatweel (stretching character).
+     *  <li> Normalization of hamza with alef seat to a bare alef.</li>
+     *  <li> Normalization of teh marbuta to heh</li>
+     *  <li> Normalization of dotless yeh (alef maksura) to yeh.</li>
+     *  <li> Removal of Arabic diacritics (the harakat)</li>
+     *  <li> Removal of tatweel (stretching character).</li>
      * </ul>
      *
      */
@@ -68,9 +68,9 @@ namespace Lucene.Net.Analysis.AR
         /**
          * Normalize an input buffer of Arabic text
          * 
-         * @param s input buffer
-         * @param len length of input buffer
-         * @return length of input buffer after normalization
+         * <param name="s">input buffer</param>
+         * <param name="len">length of input buffer</param>
+         * <returns>length of input buffer after normalization</returns>
          */
         public int Normalize(char[] s, int len)
         {
@@ -100,10 +100,10 @@ namespace Lucene.Net.Analysis.AR
         /**
          * Delete a character in-place
          * 
-         * @param s Input Buffer
-         * @param pos Position of character to delete
-         * @param len length of input buffer
-         * @return length of input buffer after deletion
+         * <param name="s">Input Buffer</param>
+         * <param name="pos">Position of character to delete</param>
+         * <param name="len">length of input buffer</param>
+         * <returns>length of input buffer after deletion</returns>
          */
         protected int Delete(char[] s, int pos, int len)
         {

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.AR
 
 
     /**
-     * A {@link TokenFilter} that applies {@link ArabicStemmer} to stem Arabic words..
+     * A <see cref="TokenFilter"/> that applies <see cref="ArabicStemmer"/> to stem Arabic words..
      * 
      */
 

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -30,13 +30,13 @@ namespace Lucene.Net.Analysis.AR
 
     /**
      *  Stemmer for Arabic.
-     *  <p>
+     *  <p/>
      *  Stemming  is done in-place for efficiency, operating on a termbuffer.
-     *  <p>
+     *  <p/>
      *  Stemming is defined as:
      *  <ul>
-     *  <li> Removal of attached definite article, conjunction, and prepositions.
-     *  <li> Stemming of common suffixes.
+     *  <li> Removal of attached definite article, conjunction, and prepositions.</li>
+     *  <li> Stemming of common suffixes.</li>
      * </ul>
      *
      */
@@ -81,9 +81,9 @@ namespace Lucene.Net.Analysis.AR
         /**
          * Stem an input buffer of Arabic text.
          * 
-         * @param s input buffer
-         * @param len length of input buffer
-         * @return length of input buffer after normalization
+         * <param name="s">input buffer</param>
+         * <param name="len">length of input buffer</param>
+         * <returns>length of input buffer after normalization</returns>
          */
         public int Stem(char[] s, int len)
         {
@@ -95,9 +95,9 @@ namespace Lucene.Net.Analysis.AR
 
         /**
          * Stem a prefix off an Arabic word.
-         * @param s input buffer
-         * @param len length of input buffer
-         * @return new length of input buffer after stemming.
+         * <param name="s">input buffer</param>
+         * <param name="len">length of input buffer</param>
+         * <returns>new length of input buffer after stemming.</returns>
          */
         public int StemPrefix(char[] s, int len)
         {
@@ -109,9 +109,9 @@ namespace Lucene.Net.Analysis.AR
 
         /**
          * Stem suffix(es) off an Arabic word.
-         * @param s input buffer
-         * @param len length of input buffer
-         * @return new length of input buffer after stemming
+         * <param name="s">input buffer</param>
+         * <param name="len">length of input buffer</param>
+         * <returns>new length of input buffer after stemming</returns>
          */
         public int StemSuffix(char[] s, int len)
         {
@@ -123,10 +123,10 @@ namespace Lucene.Net.Analysis.AR
 
         /**
          * Returns true if the prefix matches and can be stemmed
-         * @param s input buffer
-         * @param len length of input buffer
-         * @param prefix prefix to check
-         * @return true if the prefix matches and can be stemmed
+         * <param name="s">input buffer</param>
+         * <param name="len">length of input buffer</param>
+         * <param name="prefix">prefix to check</param>
+         * <returns>true if the prefix matches and can be stemmed</returns>
          */
         bool StartsWith(char[] s, int len, char[] prefix)
         {
@@ -150,10 +150,10 @@ namespace Lucene.Net.Analysis.AR
 
         /**
          * Returns true if the suffix matches and can be stemmed
-         * @param s input buffer
-         * @param len length of input buffer
-         * @param suffix suffix to check
-         * @return true if the suffix matches and can be stemmed
+         * <param name="s">input buffer</param>
+         * <param name="len">length of input buffer</param>
+         * <param name="suffix">suffix to check</param>
+         * <returns>true if the suffix matches and can be stemmed</returns>
          */
         bool EndsWith(char[] s, int len, char[] suffix)
         {
@@ -175,11 +175,11 @@ namespace Lucene.Net.Analysis.AR
         /**
          * Delete n characters in-place
          * 
-         * @param s Input Buffer
-         * @param pos Position of character to delete
-         * @param len Length of input buffer
-         * @param nChars number of characters to delete
-         * @return length of input buffer after deletion
+         * <param name="s">Input Buffer</param>
+         * <param name="pos">Position of character to delete</param>
+         * <param name="len">Length of input buffer</param>
+         * <param name="nChars">number of characters to delete</param>
+         * <returns>length of input buffer after deletion</returns>
          */
         protected int DeleteN(char[] s, int pos, int len, int nChars)
         {
@@ -191,10 +191,10 @@ namespace Lucene.Net.Analysis.AR
         /**
          * Delete a character in-place
          * 
-         * @param s Input Buffer
-         * @param pos Position of character to delete
-         * @param len length of input buffer
-         * @return length of input buffer after deletion
+         * <param name="s">Input Buffer</param>
+         * <param name="pos">Position of character to delete</param>
+         * <param name="len">length of input buffer</param>
+         * <returns>length of input buffer after deletion</returns>
          */
         protected int Delete(char[] s, int pos, int len)
         {

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -68,7 +68,7 @@ namespace Lucene.Net.Analysis.BR
         private Hashtable excltable = new Hashtable();
 
         /**
-         * Builds an analyzer with the default stop words ({@link #BRAZILIAN_STOP_WORDS}).
+         * Builds an analyzer with the default stop words (<see cref="BRAZILIAN_STOP_WORDS"/>).
          */
         public BrazilianAnalyzer()
         {
@@ -124,8 +124,8 @@ namespace Lucene.Net.Analysis.BR
         /**
          * Creates a TokenStream which tokenizes all the text in the provided Reader.
          *
-         * @return  A TokenStream build from a StandardTokenizer filtered with
-         * 			StandardFilter, StopFilter, GermanStemFilter and LowerCaseFilter.
+         * <returns>A TokenStream build from a StandardTokenizer filtered with
+         * 			StandardFilter, StopFilter, GermanStemFilter and LowerCaseFilter.</returns>
          */
         public override TokenStream TokenStream(string fieldName, TextReader reader)
         {

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -48,7 +48,7 @@ namespace Lucene.Net.Analysis.BR
         }
 
         /**
-         * @return Returns the next token in the stream, or null at EOS.
+         * <returns>Returns the next token in the stream, or null at EOS.</returns>
          */
         public override Token Next(Token reusableToken)
         {

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -41,8 +41,8 @@ namespace Lucene.Net.Analysis.BR
         /**
          * Stemms the given term to an unique <tt>discriminator</tt>.
          *
-         * @param term  The term that should be stemmed.
-         * @return      Discriminator for <tt>term</tt>
+         * <param name="term"> The term that should be stemmed.</param>
+         * <returns>     Discriminator for <tt>term</tt></returns>
          */
         public string Stem(string term)
         {
@@ -88,7 +88,7 @@ namespace Lucene.Net.Analysis.BR
         /**
          * Checks a term if it can be processed correctly.
          *
-         * @return  true if, and only if, the given term consists in letters.
+         * <returns> true if, and only if, the given term consists in letters.</returns>
          */
         private bool isStemmable(string term)
         {
@@ -106,7 +106,7 @@ namespace Lucene.Net.Analysis.BR
         /**
          * Checks a term if it can be processed indexed.
          *
-         * @return  true if it can be indexed
+         * <returns> true if it can be indexed</returns>
          */
         private bool isIndexable(string term)
         {
@@ -116,7 +116,7 @@ namespace Lucene.Net.Analysis.BR
         /**
          * See if string is 'a','e','i','o','u'
        *
-       * @return true if is vowel
+       * <returns>true if is vowel</returns>
          */
         private bool isVowel(char value)
         {
@@ -134,7 +134,7 @@ namespace Lucene.Net.Analysis.BR
        *      or is the null region at the end of the word if there is
        *      no such non-vowel.
        *
-       * @return null or a string representing R1
+       * <returns>null or a string representing R1</returns>
          */
         private string getR1(string value)
         {
@@ -194,7 +194,7 @@ namespace Lucene.Net.Analysis.BR
        *      BUT RV is the end of the word if this positions cannot be
        *      found.
        *
-       * @return null or a string representing RV
+       * <returns>null or a string representing RV</returns>
          */
         private string getRV(string value)
         {
@@ -266,7 +266,7 @@ namespace Lucene.Net.Analysis.BR
        * 3) ã -> a ; õ -> o
        * 4) ç -> c
        *
-       * @return null or a string transformed
+       * <returns>null or a string transformed</returns>
          */
         private string changeTerm(string value)
         {
@@ -326,7 +326,7 @@ namespace Lucene.Net.Analysis.BR
         /**
        * Check if a string ends with a suffix
        *
-       * @return true if the string ends with the specified suffix
+       * <returns>true if the string ends with the specified suffix</returns>
          */
         private bool suffix(string value, string suffix)
         {
@@ -348,7 +348,7 @@ namespace Lucene.Net.Analysis.BR
         /**
        * Replace a string suffix by another
        *
-       * @return the replaced string
+       * <returns>the replaced string</returns>
          */
         private string replaceSuffix(string value, string toReplace, string changeTo)
         {
@@ -377,7 +377,7 @@ namespace Lucene.Net.Analysis.BR
         /**
        * Remove a string suffix
        *
-       * @return the string without the suffix
+       * <returns>the string without the suffix</returns>
          */
         private string removeSuffix(string value, string toRemove)
         {
@@ -395,7 +395,7 @@ namespace Lucene.Net.Analysis.BR
         /**
        * See if a suffix is preceded by a string
        *
-       * @return true if the suffix is preceded
+       * <returns>true if the suffix is preceded</returns>
          */
         private bool suffixPreceded(string value, string _suffix, string preceded)
         {
@@ -460,7 +460,7 @@ namespace Lucene.Net.Analysis.BR
        * Search for the longest among the following suffixes, and perform
        * the following actions:
        *
-       * @return false if no ending was removed
+       * <returns>false if no ending was removed</returns>
          */
         private bool step1()
         {
@@ -671,7 +671,7 @@ namespace Lucene.Net.Analysis.BR
        * Search for the longest among the following suffixes in RV,
        * and if found, delete.
        *
-       * @return false if no ending was removed
+       * <returns>false if no ending was removed</returns>
         */
         private bool step2()
         {
@@ -1248,7 +1248,7 @@ namespace Lucene.Net.Analysis.BR
         /**
          * For log and debug purpose
          *
-         * @return  TERM, CT, RV, R1 and R2
+         * <returns> TERM, CT, RV, R1 and R2</returns>
          */
         public string Log()
         {

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs Sun Nov  6 05:24:26 2011
@@ -84,10 +84,10 @@ namespace Lucene.Net.Analysis.CJK
 	/// <p>
 	/// CJKTokenizer was modified from StopTokenizer which does a decent job for
 	/// most European languages. and it perferm other token method for double-byte
-	/// Characters: the token will return at each two charactors with overlap match.<br>
+	/// Characters: the token will return at each two charactors with overlap match.<br/>
 	/// Example: "java C1C2C3C4" will be segment to: "java" "C1C2" "C2C3" "C3C4" it
-	/// also need filter filter zero length token ""<br>
-	/// for Digit: digit, '+', '#' will token as letter<br>
+	/// also need filter filter zero length token ""<br/>
+	/// for Digit: digit, '+', '#' will token as letter<br/>
 	/// for more info on Asia language(Chinese Japanese Korean) text segmentation:
 	/// please search  <a
 	/// href="http://www.google.com/search?q=word+chinese+segment">google</a>
@@ -128,13 +128,13 @@ namespace Lucene.Net.Analysis.CJK
 		private int dataLen = 0;
 
 		/// <summary>
-		/// character buffer, store the characters which are used to compose <br>
+		/// character buffer, store the characters which are used to compose <br/>
 		/// the returned Token
 		/// </summary>
 		private char[] buffer = new char[MAX_WORD_LEN];
 
 		/// <summary>
-		/// I/O buffer, used to store the content of the input(one of the <br>
+		/// I/O buffer, used to store the content of the input(one of the <br/>
 		/// members of Tokenizer)
 		/// </summary>
 		private char[] ioBuffer = new char[IO_BUFFER_SIZE];
@@ -237,14 +237,14 @@ namespace Lucene.Net.Analysis.CJK
 					{
 						if (length == 0) 
 						{
-							// "javaC1C2C3C4linux" <br>
+							// "javaC1C2C3C4linux" <br/>
 							//      ^--: the current character begin to token the ASCII
 							// letter
 							start = offset - 1;
 						} 
 						else if (tokenType == "double") 
 						{
-							// "javaC1C2C3C4linux" <br>
+							// "javaC1C2C3C4linux" <br/>
 							//              ^--: the previous non-ASCII
 							// : the current character
 							offset--;

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs Sun Nov  6 05:24:26 2011
@@ -195,6 +195,7 @@ namespace Lucene.Net.Analysis.Cz
 
 		/// <summary>
 		/// Creates a TokenStream which tokenizes all the text in the provided Reader.
+		/// </summary>
 		/// <returns>
 		/// A TokenStream build from a StandardTokenizer filtered with
 		/// StandardFilter, StopFilter, GermanStemFilter and LowerCaseFilter

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/De/GermanStemmer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/De/GermanStemmer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/De/GermanStemmer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/De/GermanStemmer.cs Sun Nov  6 05:24:26 2011
@@ -175,11 +175,11 @@ namespace Lucene.Net.Analysis.De
 		/// Do some substitutions for the term to reduce overstemming:
 		///
 		/// - Substitute Umlauts with their corresponding vowel: äöü -> aou,
-		///   "ß" is substituted by "ss"
+        ///   "&#223;" is substituted by "ss"
 		/// - Substitute a second char of a pair of equal characters with
-		/// an asterisk: ?? -> ?*
+		/// an asterisk: ?? -&gt; ?*
 		/// - Substitute some common character combinations with a token:
-		///   sch/ch/ei/ie/ig/st -> $/В§/%/&/#/!
+        ///   sch/ch/ei/ie/ig/st -&gt; $/В&#167;/%/&amp;/#/!
 		/// </summary>
 		private void Substitute( StringBuilder buffer )
 		{

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs Sun Nov  6 05:24:26 2011
@@ -194,7 +194,7 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Sets the search region Strings<br>
+		/// Sets the search region Strings<br/>
 		/// it needs to be done each time the buffer was modified
 		/// </summary>
 		private void SetStrings() 
@@ -214,7 +214,7 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// First step of the Porter Algorithmn<br>
+		/// First step of the Porter Algorithmn<br/>
 		/// refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 		/// </summary>
 		private void Step1( ) 
@@ -269,9 +269,9 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Second step (A) of the Porter Algorithmn<br>
+		/// Second step (A) of the Porter Algorithmn<br/>
 		/// Will be performed if nothing changed from the first step
-		/// or changed were done in the amment, emment, ments or ment suffixes<br>
+		/// or changed were done in the amment, emment, ments or ment suffixes<br/>
 		/// refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 		/// </summary>
 		/// <returns>
@@ -288,8 +288,8 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Second step (B) of the Porter Algorithmn<br>
-		/// Will be performed if step 2 A was performed unsuccessfully<br>
+		/// Second step (B) of the Porter Algorithmn<br/>
+		/// Will be performed if step 2 A was performed unsuccessfully<br/>
 		/// refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 		/// </summary>
 		private void Step2b() 
@@ -308,7 +308,7 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Third step of the Porter Algorithmn<br>
+		/// Third step of the Porter Algorithmn<br/>
 		/// refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 		/// </summary>
 		private void Step3() 
@@ -330,7 +330,7 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Fourth step of the Porter Algorithmn<br>
+		/// Fourth step of the Porter Algorithmn<br/>
 		/// refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 		/// </summary>
 		private void Step4() 
@@ -358,7 +358,7 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Fifth step of the Porter Algorithmn<br>
+		/// Fifth step of the Porter Algorithmn<br/>
 		/// refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 		/// </summary>
 		private void Step5() 
@@ -374,7 +374,7 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Sixth (and last!) step of the Porter Algorithmn<br>
+		/// Sixth (and last!) step of the Porter Algorithmn<br/>
 		/// refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
 		/// </summary>
 		private void Step6() 
@@ -509,8 +509,8 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Delete a suffix searched in zone "source" if preceded by prefix<br>
-		/// or replace it with the replace string if preceded by the prefix in the zone "from"<br>
+		/// Delete a suffix searched in zone "source" if preceded by prefix<br/>
+		/// or replace it with the replace string if preceded by the prefix in the zone "from"<br/>
 		/// or delete the suffix if specified
 		/// </summary>
 		/// <param name="source">the primary source zone for search</param>
@@ -633,9 +633,9 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Retrieve the "R zone" (1 or 2 depending on the buffer) and return the corresponding string<br>
+		/// Retrieve the "R zone" (1 or 2 depending on the buffer) and return the corresponding string<br/>
 		/// "R is the region after the first non-vowel following a vowel
-		/// or is the null region at the end of the word if there is no such non-vowel"<br>
+		/// or is the null region at the end of the word if there is no such non-vowel"<br/>
 		/// </summary>
 		/// <param name="buffer">the in buffer</param>
 		/// <returns>the resulting string</returns>
@@ -672,10 +672,10 @@ namespace Lucene.Net.Analysis.Fr
 		}
 
 		/// <summary>
-		/// Retrieve the "RV zone" from a buffer an return the corresponding string<br>
+		/// Retrieve the "RV zone" from a buffer an return the corresponding string<br/>
 		/// "If the word begins with two vowels, RV is the region after the third letter,
 		/// otherwise the region after the first vowel not at the beginning of the word,
-		/// or the end of the word if these positions cannot be found."<br>
+		/// or the end of the word if these positions cannot be found."<br/>
 		/// </summary>
 		/// <param name="buffer">the in buffer</param>
 		/// <returns>the resulting string</returns>
@@ -711,9 +711,9 @@ namespace Lucene.Net.Analysis.Fr
 
 
 		/// <summary>
-		/// Turns u and i preceded AND followed by a vowel to UpperCase<br>
-		/// Turns y preceded OR followed by a vowel to UpperCase<br>
-		/// Turns u preceded by q to UpperCase<br>
+		/// Turns u and i preceded AND followed by a vowel to UpperCase<br/>
+		/// Turns y preceded OR followed by a vowel to UpperCase<br/>
+		/// Turns u preceded by q to UpperCase<br/>
 		/// </summary>
 		/// <param name="buffer">the buffer to treat</param>
 		/// <returns>the treated buffer</returns>

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -78,7 +78,7 @@ namespace Lucene.Net.Analyzers.Miscellan
         /// <summary>
         /// @deprecated Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. 
         /// </summary>
-        [Obsolete("The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling {@link #Next(Token)} or using the new IncrementToken() method with the new AttributeSource API.")]
+        [Obsolete("The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling Next(Token) or using the new IncrementToken() method with the new AttributeSource API.")]
         public override sealed Token Next()
         {
             return base.Next();

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -129,7 +129,7 @@ namespace Lucene.Net.Analyzers.Miscellan
         /// @deprecated Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer.
         /// </summary>
         /// <returns></returns>
-        [Obsolete("The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling {@link #Next(Token)} or using the new IncrementToken() method with the new AttributeSource API.")]
+        [Obsolete("The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling Next(Token) or using the new IncrementToken() method with the new AttributeSource API.")]
         public override sealed Token Next()
         {
             return base.Next();

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -76,7 +76,7 @@ namespace Lucene.Net.Analyzers.Miscellan
         /// </summary>
         /// <returns></returns>
         [Obsolete(
-            "The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling {@link #Next(Token)} or using the new IncrementToken() method with the new AttributeSource API."
+            "The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling Next(Token) or using the new IncrementToken() method with the new AttributeSource API."
             )]
         public override sealed Token Next()
         {

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.NGram
     /**
      * Tokenizes the given token into n-grams of given size(s).
      * <p>
-     * This {@link TokenFilter} create n-grams from the beginning edge or ending edge of a input token.
+     * This <see cref="TokenFilter"/> create n-grams from the beginning edge or ending edge of a input token.
      * </p>
      */
     public class EdgeNGramTokenFilter : TokenFilter
@@ -90,10 +90,10 @@ namespace Lucene.Net.Analysis.NGram
         /**
          * Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
          *
-         * @param input {@link TokenStream} holding the input to be tokenized
-         * @param side the {@link Side} from which to chop off an n-gram
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="input"><see cref="TokenStream"/> holding the input to be tokenized</param>
+         * <param name="side">the <see cref="Side"/> from which to chop off an n-gram</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public EdgeNGramTokenFilter(TokenStream input, Side side, int minGram, int maxGram)
             : base(input)
@@ -125,10 +125,10 @@ namespace Lucene.Net.Analysis.NGram
         /**
          * Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
          *
-         * @param input {@link TokenStream} holding the input to be tokenized
-         * @param sideLabel the name of the {@link Side} from which to chop off an n-gram
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="input"><see cref="TokenStream"/> holding the input to be tokenized</param>
+         * <param name="sideLabel">the name of the <see cref="Side"/> from which to chop off an n-gram</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public EdgeNGramTokenFilter(TokenStream input, string sideLabel, int minGram, int maxGram)
             : this(input, Side.getSide(sideLabel), minGram, maxGram)

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.NGram
     /**
      * Tokenizes the input from an edge into n-grams of given size(s).
      * <p>
-     * This {@link Tokenizer} create n-grams from the beginning edge or ending edge of a input token.
+     * This <see cref="Tokenizer"/> create n-grams from the beginning edge or ending edge of a input token.
      * MaxGram can't be larger than 1024 because of limitation.
      * </p>
      */
@@ -86,10 +86,10 @@ namespace Lucene.Net.Analysis.NGram
         /**
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          *
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param side the {@link Side} from which to chop off an n-gram
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="side">the <see cref="Side"/> from which to chop off an n-gram</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public EdgeNGramTokenizer(TextReader input, Side side, int minGram, int maxGram)
             : base(input)
@@ -100,11 +100,11 @@ namespace Lucene.Net.Analysis.NGram
         /**
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          *
-         * @param source {@link AttributeSource} to use
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param side the {@link Side} from which to chop off an n-gram
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="source"><see cref="AttributeSource"/> to use</param>
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="side">the <see cref="Side"/> from which to chop off an n-gram</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public EdgeNGramTokenizer(AttributeSource source, TextReader input, Side side, int minGram, int maxGram)
             : base(source, input)
@@ -116,11 +116,11 @@ namespace Lucene.Net.Analysis.NGram
         /**
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          * 
-         * @param factory {@link org.apache.lucene.util.AttributeSource.AttributeFactory} to use
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param side the {@link Side} from which to chop off an n-gram
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="factory"><see cref="AttributeSource.AttributeFactory"/> to use</param>
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="side">the <see cref="Side"/> from which to chop off an n-gram</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public EdgeNGramTokenizer(AttributeFactory factory, TextReader input, Side side, int minGram, int maxGram)
             : base(factory, input)
@@ -132,10 +132,10 @@ namespace Lucene.Net.Analysis.NGram
         /**
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          *
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param sideLabel the name of the {@link Side} from which to chop off an n-gram
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="sideLabel">the name of the <see cref="Side"/> from which to chop off an n-gram</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public EdgeNGramTokenizer(TextReader input, string sideLabel, int minGram, int maxGram)
             : this(input, Side.getSide(sideLabel), minGram, maxGram)
@@ -146,11 +146,11 @@ namespace Lucene.Net.Analysis.NGram
         /**
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          *
-         * @param source {@link AttributeSource} to use
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param sideLabel the name of the {@link Side} from which to chop off an n-gram
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="source"><see cref="AttributeSource"/> to use</param>
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="sideLabel">the name of the <see cref="Side"/> from which to chop off an n-gram</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public EdgeNGramTokenizer(AttributeSource source, TextReader input, string sideLabel, int minGram, int maxGram)
             : this(source, input, Side.getSide(sideLabel), minGram, maxGram)
@@ -161,11 +161,11 @@ namespace Lucene.Net.Analysis.NGram
         /**
          * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
          * 
-         * @param factory {@link org.apache.lucene.util.AttributeSource.AttributeFactory} to use
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param sideLabel the name of the {@link Side} from which to chop off an n-gram
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="factory"><see cref="AttributeSource.AttributeFactory"/> to use</param>
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="sideLabel">the name of the <see cref="Side"/> from which to chop off an n-gram</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public EdgeNGramTokenizer(AttributeFactory factory, TextReader input, string sideLabel, int minGram, int maxGram) :
             this(factory, input, Side.getSide(sideLabel), minGram, maxGram)

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -46,9 +46,9 @@ namespace Lucene.Net.Analysis.NGram
 
         /**
          * Creates NGramTokenFilter with given min and max n-grams.
-         * @param input {@link TokenStream} holding the input to be tokenized
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="input"><see cref="TokenStream"/> holding the input to be tokenized</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public NGramTokenFilter(TokenStream input, int minGram, int maxGram)
             : base(input)
@@ -71,7 +71,7 @@ namespace Lucene.Net.Analysis.NGram
 
         /**
          * Creates NGramTokenFilter with default min and max n-grams.
-         * @param input {@link TokenStream} holding the input to be tokenized
+         * <param name="input"><see cref="TokenStream"/> holding the input to be tokenized</param>
          */
         public NGramTokenFilter(TokenStream input)
             : this(input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE)

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -45,9 +45,9 @@ namespace Lucene.Net.Analysis.NGram
 
         /**
          * Creates NGramTokenizer with given min and max n-grams.
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public NGramTokenizer(TextReader input, int minGram, int maxGram)
             : base(input)
@@ -57,10 +57,10 @@ namespace Lucene.Net.Analysis.NGram
 
         /**
          * Creates NGramTokenizer with given min and max n-grams.
-         * @param source {@link AttributeSource} to use
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="source"><see cref="AttributeSource"/> to use</param>
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public NGramTokenizer(AttributeSource source, TextReader input, int minGram, int maxGram)
             : base(source, input)
@@ -70,10 +70,10 @@ namespace Lucene.Net.Analysis.NGram
 
         /**
          * Creates NGramTokenizer with given min and max n-grams.
-         * @param factory {@link org.apache.lucene.util.AttributeSource.AttributeFactory} to use
-         * @param input {@link Reader} holding the input to be tokenized
-         * @param minGram the smallest n-gram to generate
-         * @param maxGram the largest n-gram to generate
+         * <param name="factory"><see cref="AttributeSource.AttributeFactory"/> to use</param>
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
+         * <param name="minGram">the smallest n-gram to generate</param>
+         * <param name="maxGram">the largest n-gram to generate</param>
          */
         public NGramTokenizer(AttributeFactory factory, TextReader input, int minGram, int maxGram)
             : base(factory, input)
@@ -83,7 +83,7 @@ namespace Lucene.Net.Analysis.NGram
 
         /**
          * Creates NGramTokenizer with default min and max n-grams.
-         * @param input {@link Reader} holding the input to be tokenized
+         * <param name="input"><see cref="TextReader"/> holding the input to be tokenized</param>
          */
         public NGramTokenizer(TextReader input)
             : this(input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE)

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs Sun Nov  6 05:24:26 2011
@@ -87,10 +87,7 @@ namespace Lucene.Net.Analysis.Nl
 	/// not be stemmed, but indexed) and an external list of word-stem pairs that overrule
 	/// the algorithm (dictionary stemming).
 	/// A default set of stopwords is used unless an alternative list is specified, the
-	/// exclusion list is empty by default.
-	/// As start for the Analyzer the German Analyzer was used. The stemming algorithm
-	/// implemented can be found at <c cref=""></c>
-	/// 
+	/// exclusion list is empty by default. 
 	/// <version>$Id: DutchAnalyzer.java,v 1.1 2004/03/09 14:55:08 otis Exp $</version>
 	/// </summary>
 	/// <author>Edwin de Jonge</author>

Modified: incubator/lucene.net/trunk/src/contrib/Analyzers/Payloads/PayloadHelper.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Analyzers/Payloads/PayloadHelper.cs?rev=1198132&r1=1198131&r2=1198132&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Analyzers/Payloads/PayloadHelper.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Analyzers/Payloads/PayloadHelper.cs Sun Nov  6 05:24:26 2011
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.