You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2015/02/07 11:10:37 UTC

svn commit: r1658040 [1/4] - in /lucene/dev/trunk/lucene: ./ analysis/common/src/java/org/apache/lucene/analysis/bg/ analysis/common/src/java/org/apache/lucene/analysis/charfilter/ analysis/common/src/java/org/apache/lucene/analysis/cjk/ analysis/commo...

Author: rmuir
Date: Sat Feb  7 10:10:34 2015
New Revision: 1658040

URL: http://svn.apache.org/r1658040
Log:
LUCENE-4797: enable doclint html verification

Modified:
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/package-info.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/package-info.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAndSuffixAwareTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAwareTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/PayloadEncoder.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java
    lucene/dev/trunk/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java
    lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java
    lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java
    lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java
    lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java
    lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java
    lucene/dev/trunk/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java
    lucene/dev/trunk/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/BaseUIMATokenizer.java
    lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
    lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java
    lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java
    lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewCollationAnalyzerTask.java
    lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewLocaleTask.java
    lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
    lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravTask.java
    lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java
    lucene/dev/trunk/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java
    lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/FuzzySet.java
    lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java
    lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java
    lucene/dev/trunk/lucene/common-build.xml
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/Token.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/package-info.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexWriter.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50LiveDocsFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50NormsFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SegmentInfoFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50StoredFieldsFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50TermVectorsFormat.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/package-info.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/package-info.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/document/Field.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/document/LongField.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/package-info.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSet.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/Sort.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/TermRangeTermsEnum.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/package-info.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/payloads/AveragePayloadFunction.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/payloads/MaxPayloadFunction.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/payloads/package-info.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/similarities/IBSimilarity.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/similarities/package-info.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/spans/SpanFirstQuery.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/spans/Spans.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/search/spans/package-info.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/store/DataOutput.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/store/Directory.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/util/SentinelIntSet.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/util/TimSorter.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/util/WeakIdentityMap.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/util/packed/PackedInts.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/util/packed/package-info.java
    lucene/dev/trunk/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java
    lucene/dev/trunk/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java
    lucene/dev/trunk/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java
    lucene/dev/trunk/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java
    lucene/dev/trunk/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
    lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
    lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java
    lucene/dev/trunk/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
    lucene/dev/trunk/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
    lucene/dev/trunk/lucene/misc/src/java/org/apache/lucene/store/WindowsDirectory.java
    lucene/dev/trunk/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/ExtendableQueryParser.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/QueryParserHelper.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/BooleanQueryNodeBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardBooleanQueryNodeBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/StandardQueryTreeBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/StandardQueryConfigHandler.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/BooleanModifierNode.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/StandardBooleanQueryNode.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AllowLeadingWildcardProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanSingleChildOptimizationQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BoostQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/DefaultPhraseSlopQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/FuzzyQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/LowercaseExpandedTermsQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/NumericQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/PhraseSlopQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/StandardQueryNodeProcessorPipeline.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/TermRangeQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/WildcardQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeFilterBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/NumericRangeQueryBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpanOrQueryNodeBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryConfigHandler.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansQueryTreeBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/SpansValidatorQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParser.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/TestSpanQueryParserSimpleSample.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttribute.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldAttributeImpl.java
    lucene/dev/trunk/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/spans/UniqueFieldQueryNodeProcessor.java
    lucene/dev/trunk/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxOverlapRatioValueSource.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxSimilarityValueSource.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractVisitingPrefixTreeFilter.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/NumberRangePrefixTree.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java
    lucene/dev/trunk/lucene/spatial/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
    lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/spell/PlainTextDictionary.java
    lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
    lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/spell/SuggestWord.java
    lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/suggest/FileDictionary.java
    lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java
    lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/mockfile/package-info.java

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java Sat Feb  7 10:10:34 2015
@@ -37,7 +37,6 @@ import org.apache.lucene.analysis.util.S
  * This analyzer implements light-stemming as specified by: <i> Searching
  * Strategies for the Bulgarian Language </i>
  * http://members.unine.ch/jacques.savoy/Papers/BUIR.pdf
- * <p>
  */
 public final class BulgarianAnalyzer extends StopwordAnalyzerBase {
 

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/package-info.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/package-info.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/package-info.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/package-info.java Sat Feb  7 10:10:34 2015
@@ -17,7 +17,6 @@
 
 /**
  * Normalization of text before the tokenizer.
- * </p>
  * <p>
  *   CharFilters are chainable filters that normalize text before tokenization 
  *   and provide mappings between normalized text offsets and the corresponding 

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java Sat Feb  7 10:10:34 2015
@@ -38,7 +38,7 @@ public final class CJKAnalyzer extends S
 
   /**
    * File containing default CJK stopwords.
-   * <p/>
+   * <p>
    * Currently it contains some common English words that are not usually
    * useful for searching and some double-byte interpunctions.
    */

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/package-info.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/package-info.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/package-info.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/package-info.java Sat Feb  7 10:10:34 2015
@@ -32,6 +32,5 @@
  *  <li>CJKAnalyzer: 我是-是中-中国-国人</li>
  *  <li>SmartChineseAnalyzer: 我-是-中国-人</li>
  * </ol>
- * </p>
  */
 package org.apache.lucene.analysis.cjk;

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java Sat Feb  7 10:10:34 2015
@@ -27,7 +27,6 @@ import org.apache.lucene.analysis.util.C
  * "Donaudampfschiff" becomes Donau, dampf, schiff so that you can find
  * "Donaudampfschiff" even when you only enter "schiff".
  *  It uses a brute-force algorithm to achieve this.
- * <p>
  */
 public class DictionaryCompoundWordTokenFilter extends CompoundWordTokenFilterBase {
 

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java Sat Feb  7 10:10:34 2015
@@ -48,7 +48,7 @@ import org.xml.sax.InputSource;
  *  <li><code>onlyLongestMatch</code> (optional): if true, adds only the longest matching subword 
  *    to the stream. defaults to false.
  * </ul>
- * <p>
+ * <br>
  * <pre class="prettyprint">
  * &lt;fieldType name="text_hyphncomp" class="solr.TextField" positionIncrementGap="100"&gt;
  *   &lt;analyzer&gt;

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java Sat Feb  7 10:10:34 2015
@@ -92,7 +92,7 @@
  * ). The files you need are in the subfolder
  * <i>offo-hyphenation/hyph/</i>
  * .
- * <br />
+ * <br>
  * Credits for the hyphenation code go to the
  * <a href="http://xmlgraphics.apache.org/fop/">Apache FOP project</a>
  * .

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilter.java Sat Feb  7 10:10:34 2015
@@ -29,14 +29,12 @@ import org.apache.lucene.analysis.util.S
  * of the <a href="http://snowball.tartarus.org/algorithms/german2/stemmer.html">
  * German2 snowball algorithm</a>.
  * It allows for the fact that ä, ö and ü are sometimes written as ae, oe and ue.
- * <p>
  * <ul>
  *   <li> 'ß' is replaced by 'ss'
  *   <li> 'ä', 'ö', 'ü' are replaced by 'a', 'o', 'u', respectively.
  *   <li> 'ae' and 'oe' are replaced by 'a', and 'o', respectively.
  *   <li> 'ue' is replaced by 'u', when not following a vowel or q.
  * </ul>
- * <p>
  * This is useful if you want this normalization without using
  * the German2 stemmer, or perhaps no stemming at all.
  */

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilter.java Sat Feb  7 10:10:34 2015
@@ -25,12 +25,12 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
 
 /** A high-performance kstem filter for english.
- * <p/>
+ * <p>
  * See <a href="http://ciir.cs.umass.edu/pubfiles/ir-35.pdf">
  * "Viewing Morphology as an Inference Process"</a>
  * (Krovetz, R., Proceedings of the Sixteenth Annual International ACM SIGIR
  * Conference on Research and Development in Information Retrieval, 191-203, 1993).
- * <p/>
+ * <p>
  * All terms must already be lowercased for this filter to work correctly.
  *
  * <p>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java Sat Feb  7 10:10:34 2015
@@ -33,7 +33,7 @@ import org.apache.lucene.analysis.tokena
     Analyzer class that sets up the TokenStream chain as you want it.
     To use this with LowerCaseTokenizer, for example, you'd write an
     analyzer like this:
-    <P>
+    <br>
     <PRE class="prettyprint">
     class MyAnalyzer extends Analyzer {
       {@literal @Override}

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilter.java Sat Feb  7 10:10:34 2015
@@ -28,7 +28,7 @@ import org.apache.lucene.analysis.util.C
 /** 
  * A filter to apply normal capitalization rules to Tokens.  It will make the first letter
  * capital and the rest lower case.
- * <p/>
+ * <p>
  * This filter is particularly useful to build nice looking facet parameters.  This filter
  * is not appropriate if you intend to use a prefix query.
  */

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java Sat Feb  7 10:10:34 2015
@@ -29,19 +29,21 @@ import java.util.Set;
 
 /**
  * Factory for {@link CapitalizationFilter}.
- * <p/>
- * The factory takes parameters:<br/>
- * "onlyFirstWord" - should each word be capitalized or all of the words?<br/>
- * "keep" - a keep word list.  Each word that should be kept separated by whitespace.<br/>
- * "keepIgnoreCase - true or false.  If true, the keep list will be considered case-insensitive.<br/>
- * "forceFirstLetter" - Force the first letter to be capitalized even if it is in the keep list<br/>
- * "okPrefix" - do not change word capitalization if a word begins with something in this list.
+ * <p>
+ * The factory takes parameters:
+ * <ul>
+ * <li> "onlyFirstWord" - should each word be capitalized or all of the words?
+ * <li> "keep" - a keep word list.  Each word that should be kept separated by whitespace.
+ * <li> "keepIgnoreCase - true or false.  If true, the keep list will be considered case-insensitive.
+ * <li> "forceFirstLetter" - Force the first letter to be capitalized even if it is in the keep list
+ * <li> "okPrefix" - do not change word capitalization if a word begins with something in this list.
  * for example if "McK" is on the okPrefix list, the word "McKinley" should not be changed to
- * "Mckinley"<br/>
- * "minWordLength" - how long the word needs to be to get capitalization applied.  If the
- * minWordLength is 3, "and" &gt; "And" but "or" stays "or"<br/>
- * "maxWordCount" - if the token contains more then maxWordCount words, the capitalization is
- * assumed to be correct.<br/>
+ * "Mckinley"
+ * <li> "minWordLength" - how long the word needs to be to get capitalization applied.  If the
+ * minWordLength is 3, "and" &gt; "And" but "or" stays "or"
+ * <li>"maxWordCount" - if the token contains more then maxWordCount words, the capitalization is
+ * assumed to be correct.
+ * </ul>
  *
  * <pre class="prettyprint">
  * &lt;fieldType name="text_cptlztn" class="solr.TextField" positionIncrementGap="100"&gt;

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAndSuffixAwareTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAndSuffixAwareTokenFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAndSuffixAwareTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAndSuffixAwareTokenFilter.java Sat Feb  7 10:10:34 2015
@@ -24,7 +24,7 @@ import java.io.IOException;
 
 /**
  * Links two {@link PrefixAwareTokenFilter}.
- * <p/>
+ * <p>
  * <b>NOTE:</b> This filter might not behave correctly if used with custom Attributes, i.e. Attributes other than
  * the ones located in org.apache.lucene.analysis.tokenattributes. 
  */

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAwareTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAwareTokenFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAwareTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAwareTokenFilter.java Sat Feb  7 10:10:34 2015
@@ -35,7 +35,7 @@ import java.io.IOException;
  * to be used when updating the token values in the second stream based on that token.
  *
  * The default implementation adds last prefix token end offset to the suffix token start and end offsets.
- * <p/>
+ * <p>
  * <b>NOTE:</b> This filter might not behave correctly if used with custom Attributes, i.e. Attributes other than
  * the ones located in org.apache.lucene.analysis.tokenattributes. 
  */

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.java Sat Feb  7 10:10:34 2015
@@ -27,25 +27,24 @@ import java.io.IOException;
 /**
  * This filter folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o.
  * It also discriminate against use of double vowels aa, ae, ao, oe and oo, leaving just the first one.
- * <p/>
+ * <p>
  * It's a semantically more destructive solution than {@link ScandinavianNormalizationFilter} but
  * can in addition help with matching raksmorgas as räksmörgås.
- * <p/>
+ * <p>
  * blåbærsyltetøj == blåbärsyltetöj == blaabaarsyltetoej == blabarsyltetoj
  * räksmörgås == ræksmørgås == ræksmörgaos == raeksmoergaas == raksmorgas
- * <p/>
+ * <p>
  * Background:
  * Swedish åäö are in fact the same letters as Norwegian and Danish åæø and thus interchangeable
  * when used between these languages. They are however folded differently when people type
  * them on a keyboard lacking these characters.
- * <p/>
+ * <p>
  * In that situation almost all Swedish people use a, a, o instead of å, ä, ö.
- * <p/>
+ * <p>
  * Norwegians and Danes on the other hand usually type aa, ae and oe instead of å, æ and ø.
  * Some do however use a, a, o, oo, ao and sometimes permutations of everything above.
- * <p/>
+ * <p>
  * This filter solves that mismatch problem, but might also cause new.
- * <p/>
  * @see ScandinavianNormalizationFilter
  */
 public final class ScandinavianFoldingFilter extends TokenFilter {

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.java Sat Feb  7 10:10:34 2015
@@ -27,14 +27,13 @@ import java.io.IOException;
 /**
  * This filter normalize use of the interchangeable Scandinavian characters æÆäÄöÖøØ
  * and folded variants (aa, ao, ae, oe and oo) by transforming them to åÅæÆøØ.
- * <p/>
+ * <p>
  * It's a semantically less destructive solution than {@link ScandinavianFoldingFilter},
  * most useful when a person with a Norwegian or Danish keyboard queries a Swedish index
  * and vice versa. This filter does <b>not</b>  the common Swedish folds of å and ä to a nor ö to o.
- * <p/>
+ * <p>
  * blåbærsyltetøj == blåbärsyltetöj == blaabaarsyltetoej but not blabarsyltetoj
  * räksmörgås == ræksmørgås == ræksmörgaos == raeksmoergaas but not raksmorgas
- * <p/>
  * @see ScandinavianFoldingFilter
  */
 public final class ScandinavianNormalizationFilter extends TokenFilter {

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java Sat Feb  7 10:10:34 2015
@@ -96,42 +96,42 @@ public final class WordDelimiterFilter e
 
   /**
    * Causes parts of words to be generated:
-   * <p/>
+   * <p>
    * "PowerShot" =&gt; "Power" "Shot"
    */
   public static final int GENERATE_WORD_PARTS = 1;
 
   /**
    * Causes number subwords to be generated:
-   * <p/>
+   * <p>
    * "500-42" =&gt; "500" "42"
    */
   public static final int GENERATE_NUMBER_PARTS = 2;
 
   /**
    * Causes maximum runs of word parts to be catenated:
-   * <p/>
+   * <p>
    * "wi-fi" =&gt; "wifi"
    */
   public static final int CATENATE_WORDS = 4;
 
   /**
    * Causes maximum runs of word parts to be catenated:
-   * <p/>
+   * <p>
    * "wi-fi" =&gt; "wifi"
    */
   public static final int CATENATE_NUMBERS = 8;
 
   /**
    * Causes all subword parts to be catenated:
-   * <p/>
+   * <p>
    * "wi-fi-4000" =&gt; "wifi4000"
    */
   public static final int CATENATE_ALL = 16;
 
   /**
    * Causes original words are preserved and added to the subword list (Defaults to false)
-   * <p/>
+   * <p>
    * "500-42" =&gt; "500" "42" "500-42"
    */
   public static final int PRESERVE_ORIGINAL = 32;
@@ -150,7 +150,7 @@ public final class WordDelimiterFilter e
 
   /**
    * Causes trailing "'s" to be removed for each subword
-   * <p/>
+   * <p>
    * "O'Neil's" =&gt; "O", "Neil"
    */
   public static final int STEM_ENGLISH_POSSESSIVE = 256;

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java Sat Feb  7 10:10:34 2015
@@ -31,7 +31,7 @@ import org.apache.lucene.analysis.util.C
  * Tokenizes the given token into n-grams of given size(s).
  * <p>
  * This {@link TokenFilter} create n-grams from the beginning edge of a input token.
- * <p><a name="match_version" />As of Lucene 4.4, this filter handles correctly
+ * <p><a name="match_version"></a>As of Lucene 4.4, this filter handles correctly
  * supplementary characters.
  */
 public final class EdgeNGramTokenFilter extends TokenFilter {

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java Sat Feb  7 10:10:34 2015
@@ -25,7 +25,7 @@ import org.apache.lucene.util.Version;
  * Tokenizes the input from an edge into n-grams of given size(s).
  * <p>
  * This {@link Tokenizer} create n-grams from the beginning edge of a input token.
- * <p><a name="match_version" />As of Lucene 4.4, this class supports
+ * <p><a name="match_version"></a>As of Lucene 4.4, this class supports
  * {@link #isTokenChar(int) pre-tokenization} and correctly handles
  * supplementary characters.
  */

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java Sat Feb  7 10:10:34 2015
@@ -39,7 +39,7 @@ import org.apache.lucene.util.AttributeF
  * <tr><th>Position length</th><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr>
  * <tr><th>Offsets</th><td>[0,2[</td><td>[0,3[</td><td>[1,3[</td><td>[1,4[</td><td>[2,4[</td><td>[2,5[</td><td>[3,5[</td></tr>
  * </table>
- * <a name="version"/>
+ * <a name="version"></a>
  * <p>This tokenizer changed a lot in Lucene 4.4 in order to:<ul>
  * <li>tokenize in a streaming fashion to support streams which are larger
  * than 1024 chars (limit of the previous version),

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupTokenFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupTokenFilter.java Sat Feb  7 10:10:34 2015
@@ -58,9 +58,9 @@ import org.apache.lucene.util.CharsRefBu
  * </p>
  * <p>
  * <code>
- *   "([A-Z]{2,})",                                 <br />
- *   "(?&lt;![A-Z])([A-Z][a-z]+)",                     <br />
- *   "(?:^|\\b|(?&lt;=[0-9_])|(?&lt;=[A-Z]{2}))([a-z]+)", <br />
+ *   "([A-Z]{2,})",                                 
+ *   "(?&lt;![A-Z])([A-Z][a-z]+)",                     
+ *   "(?:^|\\b|(?&lt;=[0-9_])|(?&lt;=[A-Z]{2}))([a-z]+)",
  *   "([0-9]+)"
  * </code>
  * </p>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java Sat Feb  7 10:10:34 2015
@@ -30,7 +30,7 @@ import org.apache.lucene.analysis.charfi
  * The pattern match will be done in each "block" in char stream.
  * 
  * <p>
- * ex1) source="aa&nbsp;&nbsp;bb&nbsp;aa&nbsp;bb", pattern="(aa)\\s+(bb)" replacement="$1#$2"<br/>
+ * ex1) source="aa&nbsp;&nbsp;bb&nbsp;aa&nbsp;bb", pattern="(aa)\\s+(bb)" replacement="$1#$2"<br>
  * output="aa#bb&nbsp;aa#bb"
  * </p>
  * 
@@ -39,9 +39,9 @@ import org.apache.lucene.analysis.charfi
  * face a trouble.
  * 
  * <p>
- * ex2) source="aa123bb", pattern="(aa)\\d+(bb)" replacement="$1&nbsp;$2"<br/>
- * output="aa&nbsp;bb"<br/>
- * and you want to search bb and highlight it, you will get<br/>
+ * ex2) source="aa123bb", pattern="(aa)\\d+(bb)" replacement="$1&nbsp;$2"<br>
+ * output="aa&nbsp;bb"<br>
+ * and you want to search bb and highlight it, you will get<br>
  * highlight snippet="aa1&lt;em&gt;23bb&lt;/em&gt;"
  * </p>
  * 

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java Sat Feb  7 10:10:34 2015
@@ -30,7 +30,6 @@ import org.apache.lucene.util.AttributeF
 /**
  * This tokenizer uses regex pattern matching to construct distinct tokens
  * for the input stream.  It takes two arguments:  "pattern" and "group".
- * <p/>
  * <ul>
  * <li>"pattern" is the regular expression.</li>
  * <li>"group" says which group to extract into tokens.</li>
@@ -41,7 +40,7 @@ import org.apache.lucene.util.AttributeF
  * {@link String#split(java.lang.String)}
  * </p>
  * <p>
- * Using group &gt;= 0 selects the matching group as the token.  For example, if you have:<br/>
+ * Using group &gt;= 0 selects the matching group as the token.  For example, if you have:<br>
  * <pre>
  *  pattern = \'([^\']+)\'
  *  group = 0
@@ -49,7 +48,6 @@ import org.apache.lucene.util.AttributeF
  *</pre>
  * the output will be two tokens: 'bbb' and 'ccc' (including the ' marks).  With the same input
  * but using group=1, the output would be: bbb and ccc (no ' marks)
- * </p>
  * <p>NOTE: This Tokenizer does not output tokens that are of zero length.</p>
  *
  * @see Pattern

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java Sat Feb  7 10:10:34 2015
@@ -27,7 +27,7 @@ import org.apache.lucene.util.AttributeF
  * Factory for {@link PatternTokenizer}.
  * This tokenizer uses regex pattern matching to construct distinct tokens
  * for the input stream.  It takes two arguments:  "pattern" and "group".
- * <p/>
+ * <br>
  * <ul>
  * <li>"pattern" is the regular expression.</li>
  * <li>"group" says which group to extract into tokens.</li>
@@ -38,7 +38,7 @@ import org.apache.lucene.util.AttributeF
  * {@link String#split(java.lang.String)}
  * </p>
  * <p>
- * Using group &gt;= 0 selects the matching group as the token.  For example, if you have:<br/>
+ * Using group &gt;= 0 selects the matching group as the token.  For example, if you have:<br>
  * <pre>
  *  pattern = \'([^\']+)\'
  *  group = 0
@@ -46,7 +46,6 @@ import org.apache.lucene.util.AttributeF
  * </pre>
  * the output will be two tokens: 'bbb' and 'ccc' (including the ' marks).  With the same input
  * but using group=1, the output would be: bbb and ccc (no ' marks)
- * </p>
  * <p>NOTE: This Tokenizer does not output tokens that are of zero length.</p>
  *
  * <pre class="prettyprint">

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.java Sat Feb  7 10:10:34 2015
@@ -26,12 +26,12 @@ import org.apache.lucene.analysis.tokena
 
 /**
  * Characters before the delimiter are the "token", those after are the payload.
- * <p/>
+ * <p>
  * For example, if the delimiter is '|', then for the string "foo|bar", foo is the token
  * and "bar" is a payload.
- * <p/>
+ * <p>
  * Note, you can also include a {@link org.apache.lucene.analysis.payloads.PayloadEncoder} to convert the payload in an appropriate way (from characters to bytes).
- * <p/>
+ * <p>
  * Note make sure your Tokenizer doesn't split on the delimiter, or this won't work
  *
  * @see PayloadEncoder

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java Sat Feb  7 10:10:34 2015
@@ -20,8 +20,7 @@ import org.apache.lucene.util.BytesRef;
  */
 
 /**
- *  Encode a character array Float as a {@link BytesRef}.
- * <p/>
+ * Encode a character array Float as a {@link BytesRef}.
  * @see org.apache.lucene.analysis.payloads.PayloadHelper#encodeFloat(float, byte[], int)
  *
  **/

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java Sat Feb  7 10:10:34 2015
@@ -22,7 +22,7 @@ import org.apache.lucene.util.BytesRef;
 
 /**
  *  Encode a character array Integer as a {@link BytesRef}.
- * <p/>
+ * <p>
  * See {@link org.apache.lucene.analysis.payloads.PayloadHelper#encodeInt(int, byte[], int)}.
  *
  **/

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/PayloadEncoder.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/PayloadEncoder.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/PayloadEncoder.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/PayloadEncoder.java Sat Feb  7 10:10:34 2015
@@ -23,7 +23,7 @@ import org.apache.lucene.util.BytesRef;
 /**
  * Mainly for use with the DelimitedPayloadTokenFilter, converts char buffers to
  * {@link BytesRef}.
- * <p/>
+ * <p>
  * NOTE: This interface is subject to change 
  *
  **/

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java Sat Feb  7 10:10:34 2015
@@ -28,7 +28,6 @@ import org.apache.lucene.util.AttributeS
 /**
  * Attempts to parse the {@link CharTermAttribute#buffer()} as a Date using a {@link java.text.DateFormat}.
  * If the value is a Date, it will add it to the sink.
- * <p/> 
  *
  **/
 public class DateRecognizerSinkFilter extends TeeSinkTokenFilter.SinkFilter {

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java Sat Feb  7 10:10:34 2015
@@ -32,7 +32,7 @@ import org.apache.lucene.util.AttributeS
  * This TokenFilter provides the ability to set aside attribute states
  * that have already been analyzed.  This is useful in situations where multiple fields share
  * many common analysis steps and then go their separate ways.
- * <p/>
+ * <p>
  * It is also useful for doing things like entity extraction or proper noun analysis as
  * part of the analysis workflow and saving off those tokens for use in another field.
  *

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java Sat Feb  7 10:10:34 2015
@@ -36,7 +36,6 @@ import org.tartarus.snowball.SnowballPro
  *  <li>For the Turkish language, see {@link TurkishLowerCaseFilter}.
  *  <li>For other languages, see {@link LowerCaseFilter}.
  * </ul>
- * </p>
  *
  * <p>
  * Note: This filter is aware of the {@link KeywordAttribute}. To prevent

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java Sat Feb  7 10:10:34 2015
@@ -31,7 +31,6 @@ import org.apache.lucene.util.AttributeF
  * This class implements the Word Break rules from the
  * Unicode Text Segmentation algorithm, as specified in 
  * <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a>.
- * <p/>
  * <p>Many applications have specific tokenizer needs.  If this tokenizer does
  * not suit your application, please consider copying this source code
  * directory to your project and maintaining your own grammar-based tokenizer.

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java Sat Feb  7 10:10:34 2015
@@ -25,7 +25,7 @@ import org.apache.lucene.analysis.tokena
  * This class implements Word Break rules from the Unicode Text Segmentation 
  * algorithm, as specified in 
  * <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a>. 
- * <p/>
+ * <p>
  * Tokens produced are of the following types:
  * <ul>
  *   <li>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</li>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex Sat Feb  7 10:10:34 2015
@@ -23,7 +23,7 @@ import org.apache.lucene.analysis.tokena
  * This class implements Word Break rules from the Unicode Text Segmentation 
  * algorithm, as specified in 
  * <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a>. 
- * <p/>
+ * <p>
  * Tokens produced are of the following types:
  * <ul>
  *   <li>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</li>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java Sat Feb  7 10:10:34 2015
@@ -31,7 +31,7 @@ import org.apache.lucene.util.AttributeF
  * algorithm, as specified in 
  * <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a> 
  * URLs and email addresses are also tokenized according to the relevant RFCs.
- * <p/>
+ * <p>
  * Tokens produced are of the following types:
  * <ul>
  *   <li>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</li>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java Sat Feb  7 10:10:34 2015
@@ -26,7 +26,7 @@ import org.apache.lucene.analysis.tokena
  * algorithm, as specified in 
  * <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a> 
  * URLs and email addresses are also tokenized according to the relevant RFCs.
- * <p/>
+ * <p>
  * Tokens produced are of the following types:
  * <ul>
  *   <li>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</li>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex Sat Feb  7 10:10:34 2015
@@ -24,7 +24,7 @@ import org.apache.lucene.analysis.tokena
  * algorithm, as specified in 
  * <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a> 
  * URLs and email addresses are also tokenized according to the relevant RFCs.
- * <p/>
+ * <p>
  * Tokens produced are of the following types:
  * <ul>
  *   <li>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</li>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java Sat Feb  7 10:10:34 2015
@@ -58,7 +58,7 @@ import org.apache.lucene.util.fst.FST;
  * Then input <code>a b c d e</code> parses to <code>y b c
  * d</code>, ie the 2nd rule "wins" because it started
  * earliest and matched the most input tokens of other rules
- * starting at that point.</p>
+ * starting at that point.
  *
  * <p>A future improvement to this filter could allow
  * non-greedy parsing, such that the 3rd rule would win, and

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java Sat Feb  7 10:10:34 2015
@@ -73,7 +73,6 @@ import org.apache.lucene.util.Version;
  *   <li><code>boolean expand</code> - true if conflation groups should be expanded, false if they are one-directional</li>
  *   <li><code>{@link Analyzer} analyzer</code> - an analyzer used for each raw synonym</li>
  * </ul>
- * </p>
  * @see SolrSynonymParser SolrSynonymParser: default format
  */
 public class SynonymFilterFactory extends TokenFilterFactory implements ResourceLoaderAware {

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java Sat Feb  7 10:10:34 2015
@@ -130,7 +130,6 @@ public class WordlistLoader {
    * <li>The comment character is the vertical line (&#124;).
    * <li>Lines may contain trailing comments.
    * </ul>
-   * </p>
    * 
    * @param reader Reader containing a Snowball stopword list
    * @param result the {@link CharArraySet} to fill with the readers words
@@ -164,7 +163,6 @@ public class WordlistLoader {
    * <li>The comment character is the vertical line (&#124;).
    * <li>Lines may contain trailing comments.
    * </ul>
-   * </p>
    * 
    * @param reader Reader containing a Snowball stopword list
    * @return A {@link CharArraySet} with the reader's words

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java Sat Feb  7 10:10:34 2015
@@ -34,8 +34,6 @@ import java.util.*;
 /**
  * Extension of StandardTokenizer that is aware of Wikipedia syntax.  It is based off of the
  * Wikipedia tutorial available at http://en.wikipedia.org/wiki/Wikipedia:Tutorial, but it may not be complete.
- * <p/>
- * <p/>
  * @lucene.experimental
  */
 public final class WikipediaTokenizer extends Tokenizer {

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java Sat Feb  7 10:10:34 2015
@@ -1,5 +1,22 @@
 package org.apache.lucene.analysis.standard;
 
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.document.Document;
@@ -20,23 +37,7 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Random;
 
-
-/**
- * Copyright 2004 The Apache Software Foundation
- * <p/>
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
+/** tests for classicanalyzer */
 public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
 
   private Analyzer  a = new ClassicAnalyzer();

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java Sat Feb  7 10:10:34 2015
@@ -28,14 +28,14 @@ import org.apache.lucene.util.Version;
  * Base class for testing tokenstream factories. 
  * <p>
  * Example usage:
- * <code><pre>
+ * <pre class="prettyprint">
  *   Reader reader = new StringReader("Some Text to Analyze");
  *   reader = charFilterFactory("htmlstrip").create(reader);
  *   TokenStream stream = tokenizerFactory("standard").create(reader);
  *   stream = tokenFilterFactory("lowercase").create(stream);
  *   stream = tokenFilterFactory("asciifolding").create(stream);
  *   assertTokenStreamContents(stream, new String[] { "some", "text", "to", "analyze" });
- * </pre></code>
+ * </pre>
  */
 // TODO: this has to be here, since the abstract factories are not in lucene-core,
 // so test-framework doesnt know about them...

Modified: lucene/dev/trunk/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java Sat Feb  7 10:10:34 2015
@@ -39,7 +39,7 @@ import java.util.regex.Pattern;
  * Generates a file containing JFlex macros to accept valid ASCII TLDs 
  * (top level domains), for inclusion in JFlex grammars that can accept 
  * domain names.
- * <p/> 
+ * <p> 
  * The IANA Root Zone Database is queried via HTTP from URL cmdline arg #0, the
  * response is parsed, and the results are written out to a file containing 
  * a JFlex macro that will accept all valid ASCII-only TLDs, including punycode 

Modified: lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java Sat Feb  7 10:10:34 2015
@@ -46,11 +46,10 @@ import com.ibm.icu.text.UnicodeSet;
  * <li>Conversion from Fullwidth to Halfwidth forms.
  * <li>Script conversions, for example Serbian Cyrillic to Latin
  * </ul>
- * </p>
  * <p>
  * Example usage: <blockquote>stream = new ICUTransformFilter(stream,
  * Transliterator.getInstance("Traditional-Simplified"));</blockquote>
- * </p>
+ * <br>
  * For more details, see the <a
  * href="http://userguide.icu-project.org/transforms/general">ICU User
  * Guide</a>.

Modified: lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java (original)
+++ lucene/dev/trunk/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java Sat Feb  7 10:10:34 2015
@@ -42,8 +42,7 @@ import com.ibm.icu.text.RuleBasedBreakIt
  * Words are broken across script boundaries, then segmented according to
  * the BreakIterator and typing provided by the {@link DefaultICUTokenizerConfig}.
  *
- * <p/>
- *
+ * <p>
  * To use the default set of per-script rules:
  *
  * <pre class="prettyprint" >
@@ -53,13 +52,13 @@ import com.ibm.icu.text.RuleBasedBreakIt
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
  *
- * <p/>
- *
+ * <p>
  * You can customize this tokenizer's behavior by specifying per-script rule files,
  * which are compiled by the ICU RuleBasedBreakIterator.  See the
  * <a href="http://userguide.icu-project.org/boundaryanalysis#TOC-RBBI-Rules"
  * >ICU RuleBasedBreakIterator syntax reference</a>.
  *
+ * <p>
  * To add per-script rules, add a "rulefiles" argument, which should contain a
  * comma-separated list of <tt>code:rulefile</tt> pairs in the following format:
  * <a href="http://unicode.org/iso15924/iso15924-codes.html"

Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java Sat Feb  7 10:10:34 2015
@@ -28,11 +28,11 @@ import java.io.Reader;
  * <p>
  * Sequences of iteration marks are supported.  In case an illegal sequence of iteration
  * marks is encountered, the implementation emits the illegal source character as-is
- * without considering its script.  For example, with input "&#x003f;&#x309d;", we get
- * "&#x003f;&#x003f;" even though "&#x003f;" isn't hiragana.
+ * without considering its script.  For example, with input "?ゝ", we get
+ * "??" even though the question mark isn't hiragana.
  * </p>
  * <p>
- * Note that a full stop punctuation character "&#x3002;" (U+3002) can not be iterated
+ * Note that a full stop punctuation character "。" (U+3002) can not be iterated
  * (see below). Iteration marks themselves can be emitted in case they are illegal,
  * i.e. if they go back past the beginning of the character stream.
  * </p>

Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java Sat Feb  7 10:10:34 2015
@@ -24,7 +24,7 @@ import org.apache.lucene.analysis.util.T
 
 /**
  * Factory for {@link JapaneseNumberFilter}.
- * <p>
+ * <br>
  * <pre class="prettyprint">
  * &lt;fieldType name="text_ja" class="solr.TextField"&gt;
  *   &lt;analyzer&gt;

Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilter.java Sat Feb  7 10:10:34 2015
@@ -54,7 +54,6 @@ public class TestJapaneseKatakanaStemFil
    *   <li>center</li>
    * </ul>
    * Note that we remove a long sound in the case of "coffee" that is required.
-   * </p>
    */
   public void testStemVariants() throws IOException {
     assertAnalyzesTo(analyzer, "コピー コーヒー タクシー パーティー パーティ センター",

Modified: lucene/dev/trunk/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java (original)
+++ lucene/dev/trunk/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java Sat Feb  7 10:10:34 2015
@@ -40,9 +40,8 @@ import org.apache.lucene.analysis.util.T
 /**
  * Factory for {@link PhoneticFilter}.
  * 
- * Create tokens based on phonetic encoders from <a href="
- * http://commons.apache.org/codec/api-release/org/apache/commons/codec/language/package-summary.html
- * ">Apache Commons Codec</a>.
+ * Create tokens based on phonetic encoders from 
+ * <a href="http://commons.apache.org/codec/api-release/org/apache/commons/codec/language/package-summary.html">Apache Commons Codec</a>.
  * <p>
  * This takes one required argument, "encoder", and the rest are optional:
  * <dl>

Modified: lucene/dev/trunk/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/BaseUIMATokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/BaseUIMATokenizer.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/BaseUIMATokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/BaseUIMATokenizer.java Sat Feb  7 10:10:34 2015
@@ -54,7 +54,7 @@ public abstract class BaseUIMATokenizer
 
   /**
    * analyzes the tokenizer input using the given analysis engine
-   * <p/>
+   * <p>
    * {@link #cas} will be filled with  extracted metadata (UIMA annotations, feature structures)
    *
    * @throws IOException If there is a low-level I/O error.

Modified: lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java (original)
+++ lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java Sat Feb  7 10:10:34 2015
@@ -38,7 +38,7 @@ import java.util.List;
  * File can be specified as a absolute, relative or resource.
  * Two properties can be set:
  * file.query.maker.file=&lt;Full path to file containing queries&gt;
- * <br/>
+ * <br>
  * file.query.maker.default.field=&lt;Name of default field - Default value is "body"&gt;
  *
  * Example:

Modified: lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java (original)
+++ lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java Sat Feb  7 10:10:34 2015
@@ -63,7 +63,7 @@ import java.util.regex.Pattern;
  * Each component analysis factory may specify <tt>luceneMatchVersion</tt> (defaults to
  * {@link Version#LATEST}) and any of the args understood by the specified
  * *Factory class, in the above-describe param format.
- * <p/>
+ * <p>
  * Example:
  * <pre>
  *     -AnalyzerFactory(name:'strip html, fold to ascii, whitespace tokenize, max 10k tokens',
@@ -75,7 +75,7 @@ import java.util.regex.Pattern;
  *     [...]
  *     -NewAnalyzer('strip html, fold to ascii, whitespace tokenize, max 10k tokens')
  * </pre>
- * <p/>
+ * <p>
  * AnalyzerFactory will direct analysis component factories to look for resources
  * under the directory specified in the "work.dir" property.
  */

Modified: lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java (original)
+++ lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java Sat Feb  7 10:10:34 2015
@@ -96,11 +96,11 @@ public class NewAnalyzerTask extends Per
   /**
    * Set the params (analyzerName only),  Comma-separate list of Analyzer class names.  If the Analyzer lives in
    * org.apache.lucene.analysis, the name can be shortened by dropping the o.a.l.a part of the Fully Qualified Class Name.
-   * <p/>
+   * <p>
    * Analyzer names may also refer to previously defined AnalyzerFactory's.
-   * <p/>
+   * <p>
    * Example Declaration: {"NewAnalyzer" NewAnalyzer(WhitespaceAnalyzer, SimpleAnalyzer, StopAnalyzer, standard.StandardAnalyzer) &gt;
-   * <p/>
+   * <p>
    * Example AnalyzerFactory usage:
    * <pre>
    * -AnalyzerFactory(name:'whitespace tokenized',WhitespaceTokenizer)

Modified: lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewCollationAnalyzerTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewCollationAnalyzerTask.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewCollationAnalyzerTask.java (original)
+++ lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewCollationAnalyzerTask.java Sat Feb  7 10:10:34 2015
@@ -27,12 +27,11 @@ import org.apache.lucene.benchmark.byTas
 
 /**
  * Task to support benchmarking collation.
- * <p>
+ * <br>
  * <ul>
  *  <li> <code>NewCollationAnalyzer</code> with the default jdk impl
  *  <li> <code>NewCollationAnalyzer(impl:icu)</code> specify an impl (jdk,icu)
  * </ul>
- * </p>
  */
 public class NewCollationAnalyzerTask extends PerfTask {
   /**

Modified: lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewLocaleTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewLocaleTask.java?rev=1658040&r1=1658039&r2=1658040&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewLocaleTask.java (original)
+++ lucene/dev/trunk/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewLocaleTask.java Sat Feb  7 10:10:34 2015
@@ -33,7 +33,6 @@ import org.apache.lucene.benchmark.byTas
  *  <li><code>ROOT</code>: The root (language-agnostic) Locale
  *  <li>&lt;empty string&gt;: Erase the Locale (null)
  * </ul>
- * </p>
  */
 public class NewLocaleTask extends PerfTask {
   private String language;