You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by cc...@apache.org on 2012/10/05 23:22:59 UTC
svn commit: r1394820 [1/6] - in /lucene.net/trunk: src/contrib/Analyzers/
src/contrib/Analyzers/AR/ src/contrib/Analyzers/BR/
src/contrib/Analyzers/CJK/ src/contrib/Analyzers/Compound/
src/contrib/Analyzers/Compound/Hyphenation/ src/contrib/Analyzers/C...
Author: ccurrens
Date: Fri Oct 5 21:22:51 2012
New Revision: 1394820
URL: http://svn.apache.org/viewvc?rev=1394820&view=rev
Log:
Fixed a lot of XML comment errors and inconsistencies
Modified:
lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs
lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs
lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs
lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs
lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs
lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs
lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs
lucene.net/trunk/src/contrib/Analyzers/CJK/CJKAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/DictionaryCompoundWordTokenFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs
lucene.net/trunk/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/El/GreekAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/El/GreekLowerCaseFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Fa/PersianAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizationFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizer.cs
lucene.net/trunk/src/contrib/Analyzers/Fr/ElisionFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Fr/FrenchStemmer.cs
lucene.net/trunk/src/contrib/Analyzers/Miscellaneous/PatternAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs
lucene.net/trunk/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs
lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenFilter.cs
lucene.net/trunk/src/contrib/Analyzers/NGram/NGramTokenizer.cs
lucene.net/trunk/src/contrib/Analyzers/Nl/DutchAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Nl/DutchStemmer.cs
lucene.net/trunk/src/contrib/Analyzers/Payloads/TokenOffsetPayloadTokenFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Position/PositionFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/Reverse/ReverseStringFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Ru/RussianAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/Ru/RussianLetterTokenizer.cs
lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Ru/RussianStemmer.cs
lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs
lucene.net/trunk/src/contrib/Analyzers/Shingle/ShingleFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Sinks/DateRecognizerSinkFilter.cs
lucene.net/trunk/src/contrib/Analyzers/Th/ThaiAnalyzer.cs
lucene.net/trunk/src/contrib/Analyzers/Th/ThaiWordFilter.cs
lucene.net/trunk/src/contrib/Analyzers/WordlistLoader.cs
lucene.net/trunk/src/contrib/Core/Index/FieldEnumerator.cs
lucene.net/trunk/src/contrib/FastVectorHighlighter/FieldQuery.cs
lucene.net/trunk/src/contrib/FastVectorHighlighter/ScoreOrderFragmentsBuilder.cs
lucene.net/trunk/src/contrib/FastVectorHighlighter/package.html
lucene.net/trunk/src/contrib/Highlighter/Highlighter.cs
lucene.net/trunk/src/contrib/Highlighter/QueryScorer.cs
lucene.net/trunk/src/contrib/Highlighter/QueryTermScorer.cs
lucene.net/trunk/src/contrib/Highlighter/SimpleFragmenter.cs
lucene.net/trunk/src/contrib/Highlighter/SimpleHTMLEncoder.cs
lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTerm.cs
lucene.net/trunk/src/contrib/Highlighter/WeightedSpanTermExtractor.cs
lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs
lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs
lucene.net/trunk/src/contrib/Queries/DuplicateFilter.cs
lucene.net/trunk/src/contrib/Queries/FilterClause.cs
lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs
lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs
lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThisQuery.cs
lucene.net/trunk/src/contrib/Snowball/LICENSE.txt
lucene.net/trunk/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballAnalyzer.cs
lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs
lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs
lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs
lucene.net/trunk/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs
lucene.net/trunk/src/contrib/Spatial/BBox/AreaSimilarity.cs
lucene.net/trunk/src/contrib/Spatial/BBox/BBoxStrategy.cs
lucene.net/trunk/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs
lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs
lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/Node.cs
lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs
lucene.net/trunk/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs
lucene.net/trunk/src/contrib/Spatial/SpatialStrategy.cs
lucene.net/trunk/src/contrib/Spatial/Util/CompatibilityExtensions.cs
lucene.net/trunk/src/contrib/Spatial/Util/FixedBitSet.cs
lucene.net/trunk/src/contrib/Spatial/Util/FunctionQuery.cs
lucene.net/trunk/src/contrib/Spatial/Util/ValueSourceFilter.cs
lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs
lucene.net/trunk/src/contrib/SpellChecker/Spell/TRStringDistance.cs
lucene.net/trunk/src/core/Analysis/Analyzer.cs
lucene.net/trunk/src/core/Analysis/BaseCharFilter.cs
lucene.net/trunk/src/core/Analysis/CharArraySet.cs
lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs
lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs
lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.cs
lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizerImpl.jflex
lucene.net/trunk/src/core/Analysis/StopFilter.cs
lucene.net/trunk/src/core/Analysis/Token.cs
lucene.net/trunk/src/core/Analysis/TokenStream.cs
lucene.net/trunk/src/core/Document/AbstractField.cs
lucene.net/trunk/src/core/Document/Field.cs
lucene.net/trunk/src/core/Document/Fieldable.cs
lucene.net/trunk/src/core/Index/CheckIndex.cs
lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs
lucene.net/trunk/src/core/Index/DocumentsWriter.cs
lucene.net/trunk/src/core/Index/IndexCommit.cs
lucene.net/trunk/src/core/Index/IndexDeletionPolicy.cs
lucene.net/trunk/src/core/Index/IndexReader.cs
lucene.net/trunk/src/core/Index/IndexWriter.cs
lucene.net/trunk/src/core/Index/LogByteSizeMergePolicy.cs
lucene.net/trunk/src/core/Index/LogMergePolicy.cs
lucene.net/trunk/src/core/Index/Payload.cs
lucene.net/trunk/src/core/QueryParser/QueryParser.JJ
lucene.net/trunk/src/core/QueryParser/QueryParser.cs
lucene.net/trunk/src/core/Search/BooleanQuery.cs
lucene.net/trunk/src/core/Search/CachingWrapperFilter.cs
lucene.net/trunk/src/core/Search/Collector.cs
lucene.net/trunk/src/core/Search/DefaultSimilarity.cs
lucene.net/trunk/src/core/Search/DisjunctionMaxScorer.cs
lucene.net/trunk/src/core/Search/DisjunctionSumScorer.cs
lucene.net/trunk/src/core/Search/Explanation.cs
lucene.net/trunk/src/core/Search/FieldCacheImpl.cs
lucene.net/trunk/src/core/Search/FieldComparator.cs
lucene.net/trunk/src/core/Search/FieldValueHitQueue.cs
lucene.net/trunk/src/core/Search/Function/ByteFieldSource.cs
lucene.net/trunk/src/core/Search/Function/FloatFieldSource.cs
lucene.net/trunk/src/core/Search/Function/IntFieldSource.cs
lucene.net/trunk/src/core/Search/Function/ShortFieldSource.cs
lucene.net/trunk/src/core/Search/HitQueue.cs
lucene.net/trunk/src/core/Search/IndexSearcher.cs
lucene.net/trunk/src/core/Search/MultiPhraseQuery.cs
lucene.net/trunk/src/core/Search/MultiTermQuery.cs
lucene.net/trunk/src/core/Search/MultiTermQueryWrapperFilter.cs
lucene.net/trunk/src/core/Search/PhraseQuery.cs
lucene.net/trunk/src/core/Search/ReqOptSumScorer.cs
lucene.net/trunk/src/core/Search/Searchable.cs
lucene.net/trunk/src/core/Search/Searcher.cs
lucene.net/trunk/src/core/Search/Similarity.cs
lucene.net/trunk/src/core/Search/SingleTermEnum.cs
lucene.net/trunk/src/core/Search/Spans/FieldMaskingSpanQuery.cs
lucene.net/trunk/src/core/Search/TimeLimitingCollector.cs
lucene.net/trunk/src/core/Search/TopDocsCollector.cs
lucene.net/trunk/src/core/Search/TopFieldCollector.cs
lucene.net/trunk/src/core/Search/Weight.cs
lucene.net/trunk/src/core/Store/BufferedIndexOutput.cs
lucene.net/trunk/src/core/Store/CheckSumIndexOutput.cs
lucene.net/trunk/src/core/Store/FSDirectory.cs
lucene.net/trunk/src/core/Store/MMapDirectory.cs
lucene.net/trunk/src/core/Util/AttributeSource.cs
lucene.net/trunk/src/core/Util/BitUtil.cs
lucene.net/trunk/src/core/Util/Cache/SimpleMapCache.cs
lucene.net/trunk/src/core/Util/OpenBitSet.cs
lucene.net/trunk/src/demo/Demo.Common/FileDocument.cs
lucene.net/trunk/src/demo/Demo.Common/HTML/HTMLParser.jj
lucene.net/trunk/src/demo/Demo.Common/HTML/ParseException.cs
lucene.net/trunk/src/demo/Demo.Common/HTML/SimpleCharStream.cs
lucene.net/trunk/test/contrib/Analyzers/AR/TestArabicAnalyzer.cs
lucene.net/trunk/test/contrib/Analyzers/AR/TestArabicNormalizationFilter.cs
lucene.net/trunk/test/contrib/Analyzers/AR/TestArabicStemFilter.cs
lucene.net/trunk/test/contrib/Analyzers/Br/TestBrazilianStemmer.cs
lucene.net/trunk/test/contrib/Analyzers/Cjk/TestCJKTokenizer.cs
lucene.net/trunk/test/contrib/Analyzers/Cz/TestCzechAnalyzer.cs
lucene.net/trunk/test/contrib/Analyzers/De/TestGermanStemFilter.cs
lucene.net/trunk/test/contrib/Analyzers/El/GreekAnalyzerTest.cs
lucene.net/trunk/test/contrib/Analyzers/Fa/TestPersianAnalyzer.cs
lucene.net/trunk/test/contrib/Analyzers/Fa/TestPersianNormalizationFilter.cs
lucene.net/trunk/test/contrib/Analyzers/Fr/TestElision.cs
lucene.net/trunk/test/contrib/Analyzers/Fr/TestFrenchAnalyzer.cs
lucene.net/trunk/test/contrib/Analyzers/Miscellaneous/PatternAnalyzerTest.cs
lucene.net/trunk/test/contrib/Analyzers/Miscellaneous/TestPrefixAndSuffixAwareTokenFilter.cs
lucene.net/trunk/test/contrib/Analyzers/Miscellaneous/TestPrefixAwareTokenFilter.cs
lucene.net/trunk/test/contrib/Analyzers/NGram/TestEdgeNGramTokenFilter.cs
lucene.net/trunk/test/contrib/Analyzers/NGram/TestEdgeNGramTokenizer.cs
lucene.net/trunk/test/contrib/Analyzers/NGram/TestNGramTokenFilter.cs
lucene.net/trunk/test/contrib/Analyzers/NGram/TestNGramTokenizer.cs
lucene.net/trunk/test/contrib/Analyzers/Nl/TestDutchStemmer.cs
lucene.net/trunk/test/contrib/Analyzers/Position/PositionFilterTest.cs
lucene.net/trunk/test/contrib/Analyzers/Query/QueryAutoStopWordAnalyzerTest.cs
lucene.net/trunk/test/contrib/Analyzers/Ru/TestRussianAnalyzer.cs
lucene.net/trunk/test/contrib/Analyzers/Ru/TestRussianStem.cs
lucene.net/trunk/test/contrib/Analyzers/Shingle/ShingleAnalyzerWrapperTest.cs
lucene.net/trunk/test/contrib/Analyzers/Shingle/ShingleFilterTest.cs
lucene.net/trunk/test/contrib/Analyzers/Shingle/TestShingleMatrixFilter.cs
lucene.net/trunk/test/contrib/Core/Index/FieldEnumeratorTest.cs
lucene.net/trunk/test/contrib/Core/Index/SegmentsGenCommitTest.cs
lucene.net/trunk/test/contrib/Core/Index/TermVectorEnumeratorTest.cs
lucene.net/trunk/test/contrib/Core/Util/Cache/SegmentCacheTest.cs
lucene.net/trunk/test/contrib/FastVectorHighlighter/AbstractTestCase.cs
lucene.net/trunk/test/contrib/FastVectorHighlighter/FieldPhraseListTest.cs
lucene.net/trunk/test/contrib/FastVectorHighlighter/FieldQueryTest.cs
lucene.net/trunk/test/contrib/FastVectorHighlighter/FieldTermStackTest.cs
lucene.net/trunk/test/contrib/FastVectorHighlighter/IndexTimeSynonymTest.cs
lucene.net/trunk/test/contrib/FastVectorHighlighter/ScoreOrderFragmentsBuilderTest.cs
lucene.net/trunk/test/contrib/FastVectorHighlighter/SimpleFragListBuilderTest.cs
lucene.net/trunk/test/contrib/FastVectorHighlighter/SimpleFragmentsBuilderTest.cs
lucene.net/trunk/test/contrib/Highlighter/HighlighterTest.cs
lucene.net/trunk/test/contrib/Memory/MemoryIndexTest.cs
lucene.net/trunk/test/contrib/Queries/BooleanFilterTest.cs
lucene.net/trunk/test/contrib/Queries/BoostingQueryTest.cs
lucene.net/trunk/test/contrib/Queries/DuplicateFilterTest.cs
lucene.net/trunk/test/contrib/Queries/FuzzyLikeThisQueryTest.cs
lucene.net/trunk/test/contrib/Queries/Similar/TestMoreLikeThis.cs
lucene.net/trunk/test/contrib/Queries/TermsFilterTest.cs
lucene.net/trunk/test/contrib/Snowball/Analysis/Snowball/TestSnowball.cs
lucene.net/trunk/test/contrib/Spatial/CheckHits.cs
lucene.net/trunk/test/contrib/Spatial/PortedSolr3Test.cs
lucene.net/trunk/test/contrib/Spatial/Prefix/TestRecursivePrefixTreeStrategy.cs
lucene.net/trunk/test/contrib/Spatial/StrategyTestCase.cs
lucene.net/trunk/test/core/Document/TestBinaryDocument.cs
lucene.net/trunk/test/core/Index/TestIsCurrent.cs
lucene.net/trunk/test/core/Index/TestStressIndexing2.cs
lucene.net/trunk/test/core/Search/Spans/TestSpansAdvanced.cs
lucene.net/trunk/test/core/Search/TestBooleanOr.cs
lucene.net/trunk/test/core/Search/TestCachingSpanFilter.cs
lucene.net/trunk/test/core/Search/TestMultiThreadTermVectors.cs
lucene.net/trunk/test/core/Search/TestThreadSafe.cs
lucene.net/trunk/test/core/Search/TestWildcard.cs
lucene.net/trunk/test/core/Store/MockRAMDirectory.cs
lucene.net/trunk/test/core/Util/LuceneTestCase.cs
lucene.net/trunk/test/core/Util/TestBitVector.cs
lucene.net/trunk/test/core/Util/TestNumericUtils.cs
Modified: lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/AR/ArabicAnalyzer.cs Fri Oct 5 21:22:51 2012
@@ -26,7 +26,7 @@ using Lucene.Net.Support.Compatibility;
namespace Lucene.Net.Analysis.AR
{
- /**
+ /*
* <see cref="Analyzer"/> for Arabic.
* <p/>
* This analyzer implements light-stemming as specified by:
@@ -46,7 +46,7 @@ namespace Lucene.Net.Analysis.AR
public class ArabicAnalyzer : Analyzer
{
- /**
+ /*
* File containing default Arabic stopwords.
*
* Default stopword list is from http://members.unine.ch/jacques.savoy/clef/index.html
@@ -54,11 +54,11 @@ namespace Lucene.Net.Analysis.AR
*/
public static string DEFAULT_STOPWORD_FILE = "ArabicStopWords.txt";
- /**
+ /*
* Contains the stopwords used with the StopFilter.
*/
private readonly ISet<string> stoptable;
- /**<summary>
+ /*<summary>
* The comment character in the stopwords file. All lines prefixed with this will be ignored
* </summary>
*/
@@ -103,7 +103,7 @@ namespace Lucene.Net.Analysis.AR
private Version matchVersion;
- /**
+ /*
* Builds an analyzer with the default stop words: <see cref="DEFAULT_STOPWORD_FILE"/>.
*/
public ArabicAnalyzer(Version matchVersion)
@@ -122,7 +122,7 @@ namespace Lucene.Net.Analysis.AR
this.matchVersion = matchVersion;
}
- /**
+ /*
* Builds an analyzer with the given stop words.
*/
[Obsolete("Use ArabicAnalyzer(Version, Set) instead")]
@@ -131,7 +131,7 @@ namespace Lucene.Net.Analysis.AR
{
}
- /**
+ /*
* Builds an analyzer with the given stop words.
*/
[Obsolete("Use ArabicAnalyzer(Version, Set) instead")]
@@ -140,7 +140,7 @@ namespace Lucene.Net.Analysis.AR
{
}
- /**
+ /*
* Builds an analyzer with the given stop words. Lines can be commented out using <see cref="STOPWORDS_COMMENT"/>
*/
public ArabicAnalyzer(Version matchVersion, FileInfo stopwords)
@@ -149,7 +149,7 @@ namespace Lucene.Net.Analysis.AR
}
- /**
+ /*
* Creates a <see cref="TokenStream"/> which tokenizes all the text in the provided <see cref="TextReader"/>.
*
* <returns>A <see cref="TokenStream"/> built from an <see cref="ArabicLetterTokenizer"/> filtered with
@@ -174,7 +174,7 @@ namespace Lucene.Net.Analysis.AR
internal TokenStream Result;
};
- /**
+ /*
* Returns a (possibly reused) <see cref="TokenStream"/> which tokenizes all the text
* in the provided <see cref="TextReader"/>.
*
Modified: lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs Fri Oct 5 21:22:51 2012
@@ -24,7 +24,7 @@ using Lucene.Net.Util;
namespace Lucene.Net.Analysis.AR
{
- /**
+ /*
* Tokenizer that breaks text into runs of letters and diacritics.
* <p>
* The problem with the standard Letter tokenizer is that it fails on diacritics.
@@ -50,7 +50,7 @@ namespace Lucene.Net.Analysis.AR
}
- /**
+ /*
* Allows for Letter category or NonspacingMark category
* <see cref="LetterTokenizer.IsTokenChar(char)"/>
*/
Modified: lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs Fri Oct 5 21:22:51 2012
@@ -26,7 +26,7 @@ using Lucene.Net.Util;
namespace Lucene.Net.Analysis.AR
{
- /**
+ /*
* A <see cref="TokenFilter"/> that applies <see cref="ArabicNormalizer"/> to normalize the orthography.
*
*/
Modified: lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/AR/ArabicNormalizer.cs Fri Oct 5 21:22:51 2012
@@ -26,7 +26,7 @@ using Lucene.Net.Util;
namespace Lucene.Net.Analysis.AR
{
- /**
+ /*
* Normalizer for Arabic.
* <p/>
* Normalization is done in-place for efficiency, operating on a termbuffer.
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.AR
public const char SHADDA = '\u0651';
public const char SUKUN = '\u0652';
- /**
+ /*
* Normalize an input buffer of Arabic text
*
* <param name="s">input buffer</param>
@@ -110,7 +110,7 @@ namespace Lucene.Net.Analysis.AR
return len;
}
- /**
+ /*
* Delete a character in-place
*
* <param name="s">Input Buffer</param>
Modified: lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemFilter.cs Fri Oct 5 21:22:51 2012
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.AR
{
- /**
+ /*
* A <see cref="TokenFilter"/> that applies <see cref="ArabicStemmer"/> to stem Arabic words..
*
*/
Modified: lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/AR/ArabicStemmer.cs Fri Oct 5 21:22:51 2012
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.AR
{
- /**
+ /*
* Stemmer for Arabic.
* <p/>
* Stemming is done in-place for efficiency, operating on a termbuffer.
@@ -78,7 +78,7 @@ namespace Lucene.Net.Analysis.AR
};
- /**
+ /*
* Stem an input buffer of Arabic text.
*
* <param name="s">input buffer</param>
@@ -93,7 +93,7 @@ namespace Lucene.Net.Analysis.AR
return len;
}
- /**
+ /*
* Stem a prefix off an Arabic word.
* <param name="s">input buffer</param>
* <param name="len">length of input buffer</param>
@@ -107,7 +107,7 @@ namespace Lucene.Net.Analysis.AR
return len;
}
- /**
+ /*
* Stem suffix(es) off an Arabic word.
* <param name="s">input buffer</param>
* <param name="len">length of input buffer</param>
@@ -121,7 +121,7 @@ namespace Lucene.Net.Analysis.AR
return len;
}
- /**
+ /*
* Returns true if the prefix matches and can be stemmed
* <param name="s">input buffer</param>
* <param name="len">length of input buffer</param>
@@ -148,7 +148,7 @@ namespace Lucene.Net.Analysis.AR
}
}
- /**
+ /*
* Returns true if the suffix matches and can be stemmed
* <param name="s">input buffer</param>
* <param name="len">length of input buffer</param>
@@ -172,7 +172,7 @@ namespace Lucene.Net.Analysis.AR
}
- /**
+ /*
* Delete n characters in-place
*
* <param name="s">Input Buffer</param>
@@ -188,7 +188,7 @@ namespace Lucene.Net.Analysis.AR
return len;
}
- /**
+ /*
* Delete a character in-place
*
* <param name="s">Input Buffer</param>
Modified: lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs Fri Oct 5 21:22:51 2012
@@ -24,7 +24,7 @@ using Lucene.Net.Analysis.Standard;
using System.IO;
using Version = Lucene.Net.Util.Version;
-/**
+/*
* Analyzer for Brazilian language. Supports an external list of stopwords (words that
* will not be indexed at all) and an external list of exclusions (word that will
* not be stemmed, but indexed).
@@ -34,7 +34,7 @@ namespace Lucene.Net.Analysis.BR
{
public sealed class BrazilianAnalyzer : Analyzer
{
- /**
+ /*
* List of typical Brazilian stopwords.
*/
//TODO: Make this private in 3.1
@@ -83,9 +83,9 @@ namespace Lucene.Net.Analysis.BR
private readonly Version matchVersion;
+ // TODO: make this private in 3.1
/// <summary>
/// Contains words that should be indexed but not stemmed.
- // TODO: make this private in 3.1
/// </summary>
private ISet<string> excltable = Support.Compatibility.SetFactory.CreateHashSet<string>();
@@ -94,7 +94,7 @@ namespace Lucene.Net.Analysis.BR
{
}
- /**
+ /*
* Builds an analyzer with the given stop words
*
* @param matchVersion
@@ -109,7 +109,7 @@ namespace Lucene.Net.Analysis.BR
this.matchVersion = matchVersion;
}
- /**
+ /*
* Builds an analyzer with the given stop words and stemming exclusion words
*
* @param matchVersion
@@ -127,7 +127,7 @@ namespace Lucene.Net.Analysis.BR
.Copy(stemExclusionSet));
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
*/
@@ -138,7 +138,7 @@ namespace Lucene.Net.Analysis.BR
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
*/
@@ -149,7 +149,7 @@ namespace Lucene.Net.Analysis.BR
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
*/
@@ -159,7 +159,7 @@ namespace Lucene.Net.Analysis.BR
{
}
- /**
+ /*
* Builds an exclusionlist from an array of Strings.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
*/
@@ -170,7 +170,7 @@ namespace Lucene.Net.Analysis.BR
PreviousTokenStream = null; // force a new stemmer to be created
}
- /**
+ /*
* Builds an exclusionlist from a {@link Map}.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
*/
@@ -181,7 +181,7 @@ namespace Lucene.Net.Analysis.BR
PreviousTokenStream = null; // force a new stemmer to be created
}
- /**
+ /*
* Builds an exclusionlist from the words contained in the given file.
* @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
*/
@@ -192,7 +192,7 @@ namespace Lucene.Net.Analysis.BR
PreviousTokenStream = null; // force a new stemmer to be created
}
- /**
+ /*
* Creates a {@link TokenStream} which tokenizes all the text in the provided {@link Reader}.
*
* @return A {@link TokenStream} built from a {@link StandardTokenizer} filtered with
@@ -216,7 +216,7 @@ namespace Lucene.Net.Analysis.BR
protected internal TokenStream result;
};
- /**
+ /*
* Returns a (possibly reused) {@link TokenStream} which tokenizes all the text
* in the provided {@link Reader}.
*
Modified: lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemFilter.cs Fri Oct 5 21:22:51 2012
@@ -22,7 +22,7 @@ using Lucene.Net.Analysis.Tokenattribute
using Version = Lucene.Net.Util.Version;
-/**
+/*
* Based on GermanStemFilter
*
*/
@@ -32,7 +32,7 @@ namespace Lucene.Net.Analysis.BR
public sealed class BrazilianStemFilter : TokenFilter
{
- /**
+ /*
* The actual token in the input stream.
*/
private BrazilianStemmer stemmer = null;
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.BR
this.exclusions = exclusiontable;
}
- /**
+ /*
* <returns>Returns the next token in the stream, or null at EOS.</returns>
*/
public override bool IncrementToken()
Modified: lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/BR/BrazilianStemmer.cs Fri Oct 5 21:22:51 2012
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-/**
+/*
* A stemmer for Brazilian words.
*/
namespace Lucene.Net.Analysis.BR
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.BR
public class BrazilianStemmer
{
- /**
+ /*
* Changed term
*/
private string TERM;
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.BR
{
}
- /**
+ /*
* Stemms the given term to an unique <tt>discriminator</tt>.
*
* <param name="term"> The term that should be stemmed.</param>
@@ -85,7 +85,7 @@ namespace Lucene.Net.Analysis.BR
return CT;
}
- /**
+ /*
* Checks a term if it can be processed correctly.
*
* <returns> true if, and only if, the given term consists in letters.</returns>
@@ -103,7 +103,7 @@ namespace Lucene.Net.Analysis.BR
return true;
}
- /**
+ /*
* Checks a term if it can be processed indexed.
*
* <returns> true if it can be indexed</returns>
@@ -113,7 +113,7 @@ namespace Lucene.Net.Analysis.BR
return (term.Length < 30) && (term.Length > 2);
}
- /**
+ /*
* See if string is 'a','e','i','o','u'
*
* <returns>true if is vowel</returns>
@@ -127,7 +127,7 @@ namespace Lucene.Net.Analysis.BR
(value == 'u');
}
- /**
+ /*
* Gets R1
*
* R1 - is the region after the first non-vowel follwing a vowel,
@@ -179,7 +179,7 @@ namespace Lucene.Net.Analysis.BR
return value.Substring(j + 1);
}
- /**
+ /*
* Gets RV
*
* RV - IF the second letter is a consoant, RV is the region after
@@ -260,7 +260,7 @@ namespace Lucene.Net.Analysis.BR
return null;
}
- /**
+ /*
* 1) Turn to lowercase
* 2) Remove accents
* 3) ã -> a ; õ -> o
@@ -323,7 +323,7 @@ namespace Lucene.Net.Analysis.BR
return r;
}
- /**
+ /*
* Check if a string ends with a suffix
*
* <returns>true if the string ends with the specified suffix</returns>
@@ -345,7 +345,7 @@ namespace Lucene.Net.Analysis.BR
return value.Substring(value.Length - suffix.Length).Equals(suffix);
}
- /**
+ /*
* Replace a string suffix by another
*
* <returns>the replaced string</returns>
@@ -374,7 +374,7 @@ namespace Lucene.Net.Analysis.BR
}
}
- /**
+ /*
* Remove a string suffix
*
* <returns>the string without the suffix</returns>
@@ -392,7 +392,7 @@ namespace Lucene.Net.Analysis.BR
return value.Substring(0, value.Length - toRemove.Length);
}
- /**
+ /*
* See if a suffix is preceded by a string
*
* <returns>true if the suffix is preceded</returns>
@@ -414,7 +414,7 @@ namespace Lucene.Net.Analysis.BR
- /**
+ /*
* Creates CT (changed term) , substituting * 'ã' and 'õ' for 'a~' and 'o~'.
*/
private void createCT(string term)
@@ -455,7 +455,7 @@ namespace Lucene.Net.Analysis.BR
}
- /**
+ /*
* Standart suffix removal.
* Search for the longest among the following suffixes, and perform
* the following actions:
@@ -665,7 +665,7 @@ namespace Lucene.Net.Analysis.BR
}
- /**
+ /*
* Verb suffixes.
*
* Search for the longest among the following suffixes in RV,
@@ -1168,7 +1168,7 @@ namespace Lucene.Net.Analysis.BR
return false;
}
- /**
+ /*
* Delete suffix 'i' if in RV and preceded by 'c'
*
*/
@@ -1183,7 +1183,7 @@ namespace Lucene.Net.Analysis.BR
}
- /**
+ /*
* Residual suffix
*
* If the word ends with one of the suffixes (os a i o á à ó)
@@ -1213,7 +1213,7 @@ namespace Lucene.Net.Analysis.BR
}
- /**
+ /*
* If the word ends with one of ( e é ê) in RV,delete it,
* and if preceded by 'gu' (or 'ci') with the 'u' (or 'i') in RV,
* delete the 'u' (or 'i')
@@ -1245,7 +1245,7 @@ namespace Lucene.Net.Analysis.BR
}
}
- /**
+ /*
* For log and debug purpose
*
* <returns> TERM, CT, RV, R1 and R2</returns>
Modified: lucene.net/trunk/src/contrib/Analyzers/CJK/CJKAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/CJK/CJKAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/CJK/CJKAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/CJK/CJKAnalyzer.cs Fri Oct 5 21:22:51 2012
@@ -123,7 +123,7 @@ namespace Lucene.Net.Analysis.CJK
protected internal TokenStream result;
};
- /**
+ /*
* Returns a (possibly reused) {@link TokenStream} which tokenizes all the text
* in the provided {@link Reader}.
*
Modified: lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/CJK/CJKTokenizer.cs Fri Oct 5 21:22:51 2012
@@ -157,7 +157,7 @@ namespace Lucene.Net.Analysis.CJK
//~ Methods ----------------------------------------------------------------
- /**
+ /*
* Returns true for the next token in the stream, or false at EOS.
* See http://java.sun.com/j2se/1.3/docs/api/java/lang/char.UnicodeBlock.html
* for detail.
@@ -175,7 +175,7 @@ namespace Lucene.Net.Analysis.CJK
public override bool IncrementToken()
{
ClearAttributes();
- /** how many character(s) has been stored in buffer */
+ /* how many character(s) has been stored in buffer */
while (true)
{
@@ -183,13 +183,13 @@ namespace Lucene.Net.Analysis.CJK
int length = 0;
- /** the position used to create Token */
+ /* the position used to create Token */
int start = offset;
while (true)
{
// loop until we've found a full token
- /** current character */
+ /* current character */
char c;
offset++;
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs Fri Oct 5 21:22:51 2012
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
@@ -22,22 +22,22 @@ using Lucene.Net.Analysis.Tokenattribute
namespace Lucene.Net.Analysis.Compound
{
- /**
+ /*
* Base class for decomposition token filters.
*/
public abstract class CompoundWordTokenFilterBase : TokenFilter
{
- /**
+ /*
* The default for minimal word length that gets decomposed
*/
public static readonly int DEFAULT_MIN_WORD_SIZE = 5;
- /**
+ /*
* The default for minimal length of subwords that get propagated to the output of this filter
*/
public static readonly int DEFAULT_MIN_SUBWORD_SIZE = 2;
- /**
+ /*
* The default for maximal length of subwords that get propagated to the output of this filter
*/
public static readonly int DEFAULT_MAX_SUBWORD_SIZE = 15;
@@ -115,7 +115,7 @@ namespace Lucene.Net.Analysis.Compound
payloadAtt = AddAttribute<IPayloadAttribute>();
}
- /**
+ /*
* Create a set of words from an array
* The resulting Set does case insensitive matching
* TODO We should look for a faster dictionary lookup approach.
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/DictionaryCompoundWordTokenFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/DictionaryCompoundWordTokenFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/DictionaryCompoundWordTokenFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/DictionaryCompoundWordTokenFilter.cs Fri Oct 5 21:22:51 2012
@@ -24,7 +24,7 @@ using System.Collections.Generic;
namespace Lucene.Net.Analysis.Compound
{
- /**
+ /*
* A {@link TokenFilter} that decomposes compound words found in many Germanic languages.
* <p>
* "Donaudampfschiff" becomes Donau, dampf, schiff so that you can find
@@ -34,7 +34,7 @@ namespace Lucene.Net.Analysis.Compound
*/
public class DictionaryCompoundWordTokenFilter : CompoundWordTokenFilterBase
{
- /**
+ /*
*
* @param input the {@link TokenStream} to process
* @param dictionary the word dictionary to match against
@@ -50,7 +50,7 @@ namespace Lucene.Net.Analysis.Compound
}
- /**
+ /*
*
* @param input the {@link TokenStream} to process
* @param dictionary the word dictionary to match against
@@ -61,7 +61,7 @@ namespace Lucene.Net.Analysis.Compound
}
- /**
+ /*
*
* @param input the {@link TokenStream} to process
* @param dictionary the word dictionary to match against. If this is a {@link org.apache.lucene.analysis.CharArraySet CharArraySet} it must have set ignoreCase=false and only contain
@@ -73,7 +73,7 @@ namespace Lucene.Net.Analysis.Compound
}
- /**
+ /*
*
* @param input the {@link TokenStream} to process
* @param dictionary the word dictionary to match against. If this is a {@link org.apache.lucene.analysis.CharArraySet CharArraySet} it must have set ignoreCase=false and only contain
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs Fri Oct 5 21:22:51 2012
@@ -1,19 +1,19 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one or more
-// * contributor license agreements. See the NOTICE file distributed with
-// * this work for additional information regarding copyright ownership.
-// * The ASF licenses this file to You under the Apache License, Version 2.0
-// * (the "License"); you may not use this file except in compliance with
-// * the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing, software
-// * distributed under the License is distributed on an "AS IS" BASIS,
-// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// * See the License for the specific language governing permissions and
-// * limitations under the License.
-// */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
//using System;
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs Fri Oct 5 21:22:51 2012
@@ -1,19 +1,19 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one or more
-// * contributor license agreements. See the NOTICE file distributed with
-// * this work for additional information regarding copyright ownership.
-// * The ASF licenses this file to You under the Apache License, Version 2.0
-// * (the "License"); you may not use this file except in compliance with
-// * the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing, software
-// * distributed under the License is distributed on an "AS IS" BASIS,
-// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// * See the License for the specific language governing permissions and
-// * limitations under the License.
-// */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
//using System;
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs Fri Oct 5 21:22:51 2012
@@ -1,19 +1,19 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one or more
-// * contributor license agreements. See the NOTICE file distributed with
-// * this work for additional information regarding copyright ownership.
-// * The ASF licenses this file to You under the Apache License, Version 2.0
-// * (the "License"); you may not use this file except in compliance with
-// * the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing, software
-// * distributed under the License is distributed on an "AS IS" BASIS,
-// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// * See the License for the specific language governing permissions and
-// * limitations under the License.
-// */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
//using System;
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs Fri Oct 5 21:22:51 2012
@@ -1,19 +1,19 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one or more
-// * contributor license agreements. See the NOTICE file distributed with
-// * this work for additional information regarding copyright ownership.
-// * The ASF licenses this file to You under the Apache License, Version 2.0
-// * (the "License"); you may not use this file except in compliance with
-// * the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing, software
-// * distributed under the License is distributed on an "AS IS" BASIS,
-// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// * See the License for the specific language governing permissions and
-// * limitations under the License.
-// */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
//namespace Lucene.Net.Analysis.Compound.Hyphenation
//{
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs Fri Oct 5 21:22:51 2012
@@ -1,19 +1,19 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one or more
-// * contributor license agreements. See the NOTICE file distributed with
-// * this work for additional information regarding copyright ownership.
-// * The ASF licenses this file to You under the Apache License, Version 2.0
-// * (the "License"); you may not use this file except in compliance with
-// * the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing, software
-// * distributed under the License is distributed on an "AS IS" BASIS,
-// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// * See the License for the specific language governing permissions and
-// * limitations under the License.
-// */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
//using System;
//using System.Runtime.Serialization;
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs Fri Oct 5 21:22:51 2012
@@ -24,7 +24,7 @@
//namespace Lucene.Net.Analysis.Compound.Hyphenation
//{
-////**
+////*
// * This tree structure stores the hyphenation patterns in an efficient way for
// * fast lookup. It provides the provides the method to hyphenate a word.
// *
@@ -36,22 +36,22 @@
// private static readonly long serialVersionUID = -7842107987915665573L;
-// /**
+// /*
// * value space: stores the interletter values
// */
// protected ByteVector vspace;
-// /**
+// /*
// * This map stores hyphenation exceptions
// */
// protected HashMap<String,ArrayList> stoplist;
-// /**
+// /*
// * This map stores the character classes
// */
// protected TernaryTree classmap;
-// /**
+// /*
// * Temporary map to store interletter values on pattern loading.
// */
// [NonSerialized]
@@ -64,7 +64,7 @@
// vspace.Alloc(1); // this reserves index 0, which we don't use
// }
-// /**
+// /*
// * Packs the values by storing them in 4 bits, two values into a byte Values
// * range is from 0 to 9. We use zero as terminator, so we'll add 1 to the
// * value.
@@ -108,7 +108,7 @@
// return buf.ToString();
// }
-// /**
+// /*
// * Read hyphenation patterns from an XML file.
// *
// * @param f the filename
@@ -125,7 +125,7 @@
// }
// }
-// /**
+// /*
// * Read hyphenation patterns from an XML file.
// *
// * @param source the InputSource for the file
@@ -156,7 +156,7 @@
// return "";
// }
-// /**
+// /*
// * String compare, returns 0 if equal or t is a substring of s
// */
// protected int hstrcmp(char[] s, int si, char[] t, int ti) {
@@ -192,7 +192,7 @@
// return res;
// }
-// /**
+// /*
// * <p>
// * Search for all possible partial matches of word starting at index an update
// * interletter values. In other words, it does something like:
@@ -267,7 +267,7 @@
// } else {
// q = lo[q];
-// /**
+// /*
// * actually the code should be: q = sc[q] < 0 ? hi[q] : lo[q]; but
// * java chars are unsigned
// */
@@ -279,7 +279,7 @@
// }
// }
-// /**
+// /*
// * Hyphenate word and return a Hyphenation object.
// *
// * @param word the word to be hyphenated
@@ -296,7 +296,7 @@
// return hyphenate(w, 0, w.length, remainCharCount, pushCharCount);
// }
-// /**
+// /*
// * w = "****nnllllllnnn*****", where n is a non-letter, l is a letter, all n
// * may be absent, the first n is at offset, the first l is at offset +
// * iIgnoreAtBeginning; word = ".llllll.'\0'***", where all l in w are copied
@@ -310,7 +310,7 @@
// * iIgnoreAtBeginning
// */
-// /**
+// /*
// * Hyphenate word and return an array of hyphenation points.
// *
// * @param w char array that contains the word
@@ -415,7 +415,7 @@
// }
// }
-// /**
+// /*
// * Add a character class to the tree. It is used by
// * {@link PatternParser PatternParser} as callback to add character classes.
// * Character classes define the valid word characters for hyphenation. If a
@@ -437,7 +437,7 @@
// }
// }
-// /**
+// /*
// * Add an exception to the tree. It is used by
// * {@link PatternParser PatternParser} class as callback to store the
// * hyphenation exceptions.
@@ -450,7 +450,7 @@
// stoplist.Add(word, hyphenatedword);
// }
-// /**
+// /*
// * Add a pattern to the tree. Mainly, to be used by
// * {@link PatternParser PatternParser} class as callback to add a pattern to
// * the tree.
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs Fri Oct 5 21:22:51 2012
@@ -1,26 +1,26 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one or more
-// * contributor license agreements. See the NOTICE file distributed with
-// * this work for additional information regarding copyright ownership.
-// * The ASF licenses this file to You under the Apache License, Version 2.0
-// * (the "License"); you may not use this file except in compliance with
-// * the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing, software
-// * distributed under the License is distributed on an "AS IS" BASIS,
-// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// * See the License for the specific language governing permissions and
-// * limitations under the License.
-// */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
//using System;
//using System.Collections;
//namespace Lucene.Net.Analyzers.Compound.Hyphenation
//{
-///**
+// /*
// * This interface is used to connect the XML pattern file parser to the
// * hyphenation tree.
// *
@@ -28,7 +28,7 @@
// */
//public interface PatternConsumer {
-// /**
+// /*
// * Add a character class. A character class defines characters that are
// * considered equivalent for the purpose of hyphenation (e.g. "aA"). It
// * usually means to ignore case.
@@ -37,7 +37,7 @@
// */
// void AddClass(string chargroup);
-// /**
+// /*
// * Add a hyphenation exception. An exception replaces the result obtained by
// * the algorithm for cases for which this fails or the user wants to provide
// * his own hyphenation. A hyphenatedword is a vector of alternating String's
@@ -45,7 +45,7 @@
// */
// void AddException(string word, ArrayList hyphenatedword);
-// /**
+// /*
// * Add hyphenation patterns.
// *
// * @param pattern the pattern
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs Fri Oct 5 21:22:51 2012
@@ -8,7 +8,7 @@
//namespace Lucene.Net.Analyzers.Compound.Hyphenation
//{
-///**
+// /*
// * A SAX document handler to read and parse hyphenation patterns from a XML
// * file.
// *
@@ -59,7 +59,7 @@
// this.consumer = consumer;
// }
-// /**
+// /*
// * Parses a hyphenation pattern file.
// *
// * @param filename the filename
@@ -70,7 +70,7 @@
// parse(new FileInfo(filename));
// }
-// /**
+// /*
// * Parses a hyphenation pattern file.
// *
// * @param file the pattern file
@@ -87,7 +87,7 @@
// }
// }
-// /**
+// /*
// * Parses a hyphenation pattern file.
// *
// * @param source the InputSource for the file
@@ -106,7 +106,7 @@
// }
// }
-// /**
+// /*
// * Creates a SAX parser using JAXP
// *
// * @return the created SAX parser
@@ -250,7 +250,7 @@
// // ContentHandler methods
// //
-// /**
+// /*
// * @see org.xml.sax.ContentHandler#startElement(java.lang.String,
// * java.lang.String, java.lang.String, org.xml.sax.Attributes)
// */
@@ -279,7 +279,7 @@
// token.SetLength(0);
// }
-// /**
+// /*
// * @see org.xml.sax.ContentHandler#endElement(java.lang.String,
// * java.lang.String, java.lang.String)
// */
@@ -316,7 +316,7 @@
// }
-// /**
+// /*
// * @see org.xml.sax.ContentHandler#chars(char[], int, int)
// */
// public override void chars(char ch[], int start, int Length) {
@@ -349,21 +349,21 @@
// // ErrorHandler methods
// //
-// /**
+// /*
// * @see org.xml.sax.ErrorHandler#warning(org.xml.sax.SAXParseException)
// */
// public override void warning(SAXParseException ex) {
// errMsg = "[Warning] " + getLocationString(ex) + ": " + ex.GetMessage();
// }
-// /**
+// /*
// * @see org.xml.sax.ErrorHandler#error(org.xml.sax.SAXParseException)
// */
// public override void error(SAXParseException ex) {
// errMsg = "[Error] " + getLocationString(ex) + ": " + ex.GetMessage();
// }
-// /**
+// /*
// * @see org.xml.sax.ErrorHandler#fatalError(org.xml.sax.SAXParseException)
// */
// public override void fatalError(SAXParseException ex) throws SAXException {
@@ -371,7 +371,7 @@
// throw ex;
// }
-// /**
+// /*
// * Returns a string of the location.
// */
// private String getLocationString(SAXParseException ex) {
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs Fri Oct 5 21:22:51 2012
@@ -1,19 +1,19 @@
-///*
-// * Licensed to the Apache Software Foundation (ASF) under one or more
-// * contributor license agreements. See the NOTICE file distributed with
-// * this work for additional information regarding copyright ownership.
-// * The ASF licenses this file to You under the Apache License, Version 2.0
-// * (the "License"); you may not use this file except in compliance with
-// * the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing, software
-// * distributed under the License is distributed on an "AS IS" BASIS,
-// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// * See the License for the specific language governing permissions and
-// * limitations under the License.
-// */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
//using System;
//using System.Collections;
@@ -21,7 +21,7 @@
//namespace Lucene.Net.Analysis.Compound.Hyphenation
//{
-// /**
+// /*
// * <h2>Ternary Search Tree.</h2>
// *
// * <p>
@@ -66,7 +66,7 @@
// public class TernaryTree : ICloneable
// {
-// /**
+// /*
// * We use 4 arrays to represent a node. I guess I should have created a proper
// * node class, but somehow Knuth's pascal code made me forget we now have a
// * portable language with virtual memory management and automatic garbage
@@ -74,23 +74,23 @@
// * fix it.
// */
-// /**
+// /*
// * Pointer to low branch and to rest of the key when it is stored directly in
// * this node, we don't have unions in java!
// */
// protected char[] lo;
-// /**
+// /*
// * Pointer to high branch.
// */
// protected char[] hi;
-// /**
+// /*
// * Pointer to equal branch and to data when this node is a string terminator.
// */
// protected char[] eq;
-// /**
+// /*
// * <P>
// * The character stored in this node: splitchar. Two special values are
// * reserved:
@@ -106,7 +106,7 @@
// */
// protected char[] sc;
-// /**
+// /*
// * This vector holds the trailing of the keys when the branch is compressed.
// */
// protected CharVector kv;
@@ -136,7 +136,7 @@
// kv = new CharVector();
// }
-// /**
+// /*
// * Branches are initially compressed, needing one node per key plus the size
// * of the string key. They are decompressed as needed when another key with
// * same prefix is inserted. This saves a lot of space, specially for long
@@ -166,7 +166,7 @@
// root = insert(root, key, start, val);
// }
-// /**
+// /*
// * The actual insertion function, recursive version.
// */
// private char insert(char p, char[] key, int start, char val)
@@ -258,7 +258,7 @@
// return p;
// }
-// /**
+// /*
// * Compares 2 null terminated char arrays
// */
// public static int strcmp(char[] a, int startA, char[] b, int startB)
@@ -273,7 +273,7 @@
// return a[startA] - b[startB];
// }
-// /**
+// /*
// * Compares a string with null terminated char array
// */
// public static int strcmp(String str, char[] a, int start)
@@ -419,7 +419,7 @@
// return t;
// }
-// /**
+// /*
// * Recursively insert the median first and then the median of the lower and
// * upper halves, and so on in order to get a balanced tree. The array of keys
// * is assumed to be sorted in ascending order.
@@ -439,7 +439,7 @@
// insertBalanced(k, v, offset + m + 1, n - m - 1);
// }
-// /**
+// /*
// * Balance the tree for best search performance
// */
// public void balance()
@@ -464,7 +464,7 @@
// // System.out.println(sc[root]);
// }
-// /**
+// /*
// * Each node stores a character (splitchar) which is part of some key(s). In a
// * compressed branch (one that only contain a single string key) the trailer
// * of the key which is not already in nodes is stored externally in the kv
@@ -529,12 +529,12 @@
// public class Iterator : IEnumerator
// {
-// /**
+// /*
// * current node index
// */
// int cur;
-// /**
+// /*
// * current key
// */
// String curkey;
@@ -564,12 +564,12 @@
// }
-// /**
+// /*
// * Node stack
// */
// Stack ns;
-// /**
+// /*
// * key stack implemented with a StringBuilder
// */
// StringBuilder ks;
@@ -612,7 +612,7 @@
// return (cur != -1);
// }
-// /**
+// /*
// * traverse upwards
// */
// private int up()
@@ -676,7 +676,7 @@
// return res;
// }
-// /**
+// /*
// * traverse the tree to find next key
// */
// private int Run()
Modified: lucene.net/trunk/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs Fri Oct 5 21:22:51 2012
@@ -1,19 +1,19 @@
-///**
-// * Licensed to the Apache Software Foundation (ASF) under one or more
-// * contributor license agreements. See the NOTICE file distributed with
-// * this work for additional information regarding copyright ownership.
-// * The ASF licenses this file to You under the Apache License, Version 2.0
-// * (the "License"); you may not use this file except in compliance with
-// * the License. You may obtain a copy of the License at
-// *
-// * http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing, software
-// * distributed under the License is distributed on an "AS IS" BASIS,
-// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// * See the License for the specific language governing permissions and
-// * limitations under the License.
-// */
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
//using System;
//using System.Collections.Generic;
@@ -23,7 +23,7 @@
//namespace Lucene.Net.Analysis.Compound
//{
-// /**
+// /*
// * A {@link TokenFilter} that decomposes compound words found in many Germanic languages.
// * <p>
// * "Donaudampfschiff" becomes Donau, dampf, schiff so that you can find
@@ -35,7 +35,7 @@
//{
// private HyphenationTree hyphenator;
-// /**
+// /*
// *
// * @param input the {@link TokenStream} to process
// * @param hyphenator the hyphenation pattern tree to use for hyphenation
@@ -52,7 +52,7 @@
// {
// }
-// /**
+// /*
// *
// * @param input the {@link TokenStream} to process
// * @param hyphenator the hyphenation pattern tree to use for hyphenation
@@ -65,7 +65,7 @@
// }
-// /**
+// /*
// *
// * @param input the {@link TokenStream} to process
// * @param hyphenator the hyphenation pattern tree to use for hyphenation
@@ -79,7 +79,7 @@
// }
-// /**
+// /*
// *
// * @param input the {@link TokenStream} to process
// * @param hyphenator the hyphenation pattern tree to use for hyphenation
@@ -103,7 +103,7 @@
// this.hyphenator = hyphenator;
// }
-// /**
+// /*
// * Create a hyphenator tree
// *
// * @param hyphenationFilename the filename of the XML grammar to load
@@ -115,7 +115,7 @@
// return GetHyphenationTree(new InputSource(hyphenationFilename));
// }
-// /**
+// /*
// * Create a hyphenator tree
// *
// * @param hyphenationFile the file of the XML grammar to load
@@ -127,7 +127,7 @@
// return GetHyphenationTree(new InputSource(hyphenationFile.toURL().toExternalForm()));
// }
-// /**
+// /*
// * Create a hyphenator tree
// *
// * @param hyphenationReader the reader of the XML grammar to load from
@@ -144,7 +144,7 @@
// return getHyphenationTree(is);
// }
-// /**
+// /*
// * Create a hyphenator tree
// *
// * @param hyphenationSource the InputSource pointing to the XML grammar
Modified: lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Cz/CzechAnalyzer.cs Fri Oct 5 21:22:51 2012
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
@@ -28,7 +28,7 @@ using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analysis.Cz
{
-/**
+/*
* {@link Analyzer} for Czech language.
* <p>
* Supports an external list of stopwords (words that
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Cz
*/
public sealed class CzechAnalyzer : Analyzer {
- /**
+ /*
* List of typical stopwords.
* @deprecated use {@link #getDefaultStopSet()} instead
*/
@@ -67,7 +67,7 @@ public sealed class CzechAnalyzer : Anal
"jeho\u017e","j\u00ed\u017e","jeliko\u017e","je\u017e","jako\u017e","na\u010de\u017e",
};
- /**
+ /*
* Returns a set of default Czech-stopwords
* @return a set of default Czech-stopwords
*/
@@ -80,14 +80,14 @@ public sealed class CzechAnalyzer : Anal
(IEnumerable<string>)CZECH_STOP_WORDS, false));
}
- /**
+ /*
* Contains the stopwords used with the {@link StopFilter}.
*/
// TODO make this final in 3.1
private ISet<string> stoptable;
private readonly Version matchVersion;
- /**
+ /*
* Builds an analyzer with the default stop words ({@link #CZECH_STOP_WORDS}).
*/
public CzechAnalyzer(Version matchVersion)
@@ -96,7 +96,7 @@ public sealed class CzechAnalyzer : Anal
}
- /**
+ /*
* Builds an analyzer with the given stop words and stemming exclusion words
*
* @param matchVersion
@@ -110,7 +110,7 @@ public sealed class CzechAnalyzer : Anal
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
*/
@@ -120,7 +120,7 @@ public sealed class CzechAnalyzer : Anal
}
- /**
+ /*
* Builds an analyzer with the given stop words.
*
* @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
@@ -131,7 +131,7 @@ public sealed class CzechAnalyzer : Anal
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
*/
@@ -141,7 +141,7 @@ public sealed class CzechAnalyzer : Anal
}
- /**
+ /*
* Loads stopwords hash from resource stream (file, database...).
* @param wordfile File containing the wordlist
* @param encoding Encoding used (win-1250, iso-8859-2, ...), null for default system encoding
@@ -173,7 +173,7 @@ public sealed class CzechAnalyzer : Anal
}
}
- /**
+ /*
* Creates a {@link TokenStream} which tokenizes all the text in the provided {@link Reader}.
*
* @return A {@link TokenStream} built from a {@link StandardTokenizer} filtered with
@@ -193,7 +193,7 @@ public sealed class CzechAnalyzer : Anal
protected internal TokenStream result;
};
- /**
+ /*
* Returns a (possibly reused) {@link TokenStream} which tokenizes all the text in
* the provided {@link Reader}.
*
Modified: lucene.net/trunk/src/contrib/Analyzers/El/GreekAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/El/GreekAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/El/GreekAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/El/GreekAnalyzer.cs Fri Oct 5 21:22:51 2012
@@ -28,7 +28,7 @@ using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analysis.El
{
- /**
+ /*
* {@link Analyzer} for the Greek language.
* <p>
* Supports an external list of stopwords (words
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.El
*/
public sealed class GreekAnalyzer : Analyzer
{
- /**
+ /*
* List of typical Greek stopwords.
*/
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.El
"ιÏÏÏ", "οÏο", "οÏι"
};
- /**
+ /*
* Returns a set of default Greek-stopwords
* @return a set of default Greek-stopwords
*/
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.El
internal static ISet<string> DEFAULT_SET = CharArraySet.UnmodifiableSet(new CharArraySet((IEnumerable<string>)GREEK_STOP_WORDS, false));
}
- /**
+ /*
* Contains the stopwords used with the {@link StopFilter}.
*/
private readonly ISet<string> stopSet;
@@ -91,7 +91,7 @@ namespace Lucene.Net.Analysis.El
{
}
- /**
+ /*
* Builds an analyzer with the given stop words
*
* @param matchVersion
@@ -105,7 +105,7 @@ namespace Lucene.Net.Analysis.El
this.matchVersion = matchVersion;
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @param stopwords Array of stopwords to use.
* @deprecated use {@link #GreekAnalyzer(Version, Set)} instead
@@ -115,7 +115,7 @@ namespace Lucene.Net.Analysis.El
{
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @deprecated use {@link #GreekAnalyzer(Version, Set)} instead
*/
@@ -124,7 +124,7 @@ namespace Lucene.Net.Analysis.El
{
}
- /**
+ /*
* Creates a {@link TokenStream} which tokenizes all the text in the provided {@link Reader}.
*
* @return A {@link TokenStream} built from a {@link StandardTokenizer} filtered with
@@ -145,7 +145,7 @@ namespace Lucene.Net.Analysis.El
protected internal TokenStream result;
};
- /**
+ /*
* Returns a (possibly reused) {@link TokenStream} which tokenizes all the text
* in the provided {@link Reader}.
*
Modified: lucene.net/trunk/src/contrib/Analyzers/El/GreekLowerCaseFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/El/GreekLowerCaseFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/El/GreekLowerCaseFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/El/GreekLowerCaseFilter.cs Fri Oct 5 21:22:51 2012
@@ -23,7 +23,7 @@ using Lucene.Net.Analysis.Tokenattribute
namespace Lucene.Net.Analysis.El
{
- /**
+ /*
* Normalizes token text to lower case, removes some Greek diacritics,
* and standardizes final sigma to sigma.
*
Modified: lucene.net/trunk/src/contrib/Analyzers/Fa/PersianAnalyzer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Fa/PersianAnalyzer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Fa/PersianAnalyzer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Fa/PersianAnalyzer.cs Fri Oct 5 21:22:51 2012
@@ -28,7 +28,7 @@ using Version = Lucene.Net.Util.Version;
namespace Lucene.Net.Analysis.Fa
{
- /**
+ /*
* {@link Analyzer} for Persian.
* <p>
* This Analyzer uses {@link ArabicLetterTokenizer} which implies tokenizing around
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Fa
public sealed class PersianAnalyzer : Analyzer
{
- /**
+ /*
* File containing default Persian stopwords.
*
* Default stopword list is from
@@ -49,18 +49,18 @@ namespace Lucene.Net.Analysis.Fa
*/
public readonly static String DEFAULT_STOPWORD_FILE = "stopwords.txt";
- /**
+ /*
* Contains the stopwords used with the StopFilter.
*/
private readonly ISet<string> stoptable;
- /**
+ /*
* The comment character in the stopwords file. All lines prefixed with this
* will be ignored
*/
public static readonly String STOPWORDS_COMMENT = "#";
- /**
+ /*
* Returns an unmodifiable instance of the default stop-words set.
* @return an unmodifiable instance of the default stop-words set.
*/
@@ -69,7 +69,7 @@ namespace Lucene.Net.Analysis.Fa
return DefaultSetHolder.DEFAULT_STOP_SET;
}
- /**
+ /*
* Atomically loads the DEFAULT_STOP_SET in a lazy fashion once the outer class
* accesses the static final set the first time.;
*/
@@ -110,7 +110,7 @@ namespace Lucene.Net.Analysis.Fa
private readonly Version matchVersion;
- /**
+ /*
* Builds an analyzer with the default stop words:
* {@link #DEFAULT_STOPWORD_FILE}.
*/
@@ -120,7 +120,7 @@ namespace Lucene.Net.Analysis.Fa
}
- /**
+ /*
* Builds an analyzer with the given stop words
*
* @param matchVersion
@@ -134,7 +134,7 @@ namespace Lucene.Net.Analysis.Fa
this.matchVersion = matchVersion;
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @deprecated use {@link #PersianAnalyzer(Version, Set)} instead
*/
@@ -144,7 +144,7 @@ namespace Lucene.Net.Analysis.Fa
}
- /**
+ /*
* Builds an analyzer with the given stop words.
* @deprecated use {@link #PersianAnalyzer(Version, Set)} instead
*/
@@ -154,7 +154,7 @@ namespace Lucene.Net.Analysis.Fa
}
- /**
+ /*
* Builds an analyzer with the given stop words. Lines can be commented out
* using {@link #STOPWORDS_COMMENT}
* @deprecated use {@link #PersianAnalyzer(Version, Set)} instead
@@ -165,7 +165,7 @@ namespace Lucene.Net.Analysis.Fa
}
- /**
+ /*
* Creates a {@link TokenStream} which tokenizes all the text in the provided
* {@link Reader}.
*
@@ -196,7 +196,7 @@ namespace Lucene.Net.Analysis.Fa
protected internal TokenStream result;
}
- /**
+ /*
* Returns a (possibly reused) {@link TokenStream} which tokenizes all the text
* in the provided {@link Reader}.
*
Modified: lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizationFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizationFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizationFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizationFilter.cs Fri Oct 5 21:22:51 2012
@@ -23,7 +23,7 @@ using Lucene.Net.Analysis.Tokenattribute
namespace Lucene.Net.Analysis.Fa
{
- /**
+ /*
* A {@link TokenFilter} that applies {@link PersianNormalizer} to normalize the
* orthography.
*
Modified: lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizer.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizer.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizer.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Fa/PersianNormalizer.cs Fri Oct 5 21:22:51 2012
@@ -23,7 +23,7 @@ using System;
namespace Lucene.Net.Analysis.Fa
{
-/**
+/*
* Normalizer for Persian.
* <p>
* Normalization is done in-place for efficiency, operating on a termbuffer.
@@ -55,7 +55,7 @@ public class PersianNormalizer {
public const char HEH = '\u0647';
- /**
+ /*
* Normalize an input buffer of Persian text
*
* @param s input buffer
@@ -89,7 +89,7 @@ public class PersianNormalizer {
return len;
}
- /**
+ /*
* Delete a character in-place
*
* @param s Input Buffer
Modified: lucene.net/trunk/src/contrib/Analyzers/Fr/ElisionFilter.cs
URL: http://svn.apache.org/viewvc/lucene.net/trunk/src/contrib/Analyzers/Fr/ElisionFilter.cs?rev=1394820&r1=1394819&r2=1394820&view=diff
==============================================================================
--- lucene.net/trunk/src/contrib/Analyzers/Fr/ElisionFilter.cs (original)
+++ lucene.net/trunk/src/contrib/Analyzers/Fr/ElisionFilter.cs Fri Oct 5 21:22:51 2012
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
@@ -21,7 +21,7 @@ using Lucene.Net.Analysis.Tokenattribute
namespace Lucene.Net.Analysis.Fr
{
- /**
+ /*
* Removes elisions from a {@link TokenStream}. For example, "l'avion" (the plane) will be
* tokenized as "avion" (plane).
* <p>
@@ -44,14 +44,14 @@ namespace Lucene.Net.Analysis.Fr
this.articles = new CharArraySet(articles, true);
}
- /**
+ /*
* Constructs an elision filter with standard stop words
*/
internal ElisionFilter(TokenStream input)
: this(input, new[] { "l", "m", "t", "qu", "n", "s", "j" })
{ }
- /**
+ /*
* Constructs an elision filter with a Set of stop words
*/
public ElisionFilter(TokenStream input, ISet<string> articles)
@@ -61,7 +61,7 @@ namespace Lucene.Net.Analysis.Fr
termAtt = AddAttribute<ITermAttribute>();
}
- /**
+ /*
* Constructs an elision filter with an array of stop words
*/
public ElisionFilter(TokenStream input, IEnumerable<string> articles)
@@ -71,7 +71,7 @@ namespace Lucene.Net.Analysis.Fr
termAtt = AddAttribute<ITermAttribute>();
}
- /**
+ /*
* Increments the {@link TokenStream} with a {@link TermAttribute} without elisioned start
*/
public override sealed bool IncrementToken()