You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ro...@apache.org on 2014/12/01 18:25:47 UTC
svn commit: r1642718 [2/12] - in /lucene/dev/branches/lucene2878: ./
dev-tools/ dev-tools/eclipse/dot.settings/ dev-tools/idea/.idea/
dev-tools/idea/lucene/benchmark/src/ dev-tools/idea/lucene/highlighter/
dev-tools/maven/ dev-tools/maven/solr/webapp/ ...
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java Mon Dec 1 17:25:39 2014
@@ -66,7 +66,7 @@ import java.util.Arrays;
* <ul>
* <li><code>"PowerShot"</code> →
* <code>0:"Power", 1:"Shot" 1:"PowerShot"</code></li>
- * <li><code>"A's+B's&C's"</code> -gt; <code>0:"A", 1:"B", 2:"C", 2:"ABC"</code>
+ * <li><code>"A's+B's&C's"</code> > <code>0:"A", 1:"B", 2:"C", 2:"ABC"</code>
* </li>
* <li><code>"Super-Duper-XL500-42-AutoCoder!"</code> →
* <code>0:"Super", 1:"Duper", 2:"XL", 2:"SuperDuperXL", 3:"500" 4:"42", 5:"Auto", 6:"Coder", 6:"AutoCoder"</code>
@@ -97,42 +97,42 @@ public final class WordDelimiterFilter e
/**
* Causes parts of words to be generated:
* <p/>
- * "PowerShot" => "Power" "Shot"
+ * "PowerShot" => "Power" "Shot"
*/
public static final int GENERATE_WORD_PARTS = 1;
/**
* Causes number subwords to be generated:
* <p/>
- * "500-42" => "500" "42"
+ * "500-42" => "500" "42"
*/
public static final int GENERATE_NUMBER_PARTS = 2;
/**
* Causes maximum runs of word parts to be catenated:
* <p/>
- * "wi-fi" => "wifi"
+ * "wi-fi" => "wifi"
*/
public static final int CATENATE_WORDS = 4;
/**
* Causes maximum runs of word parts to be catenated:
* <p/>
- * "wi-fi" => "wifi"
+ * "wi-fi" => "wifi"
*/
public static final int CATENATE_NUMBERS = 8;
/**
* Causes all subword parts to be catenated:
* <p/>
- * "wi-fi-4000" => "wifi4000"
+ * "wi-fi-4000" => "wifi4000"
*/
public static final int CATENATE_ALL = 16;
/**
* Causes original words are preserved and added to the subword list (Defaults to false)
* <p/>
- * "500-42" => "500" "42" "500-42"
+ * "500-42" => "500" "42" "500-42"
*/
public static final int PRESERVE_ORIGINAL = 32;
@@ -151,7 +151,7 @@ public final class WordDelimiterFilter e
/**
* Causes trailing "'s" to be removed for each subword
* <p/>
- * "O'Neil's" => "O", "Neil"
+ * "O'Neil's" => "O", "Neil"
*/
public static final int STEM_ENGLISH_POSSESSIVE = 256;
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java Mon Dec 1 17:25:39 2014
@@ -61,7 +61,7 @@ public final class WordDelimiterIterator
/**
* If true, causes trailing "'s" to be removed for each subword. (Defaults to true)
* <p/>
- * "O'Neil's" => "O", "Neil"
+ * "O'Neil's" => "O", "Neil"
*/
final boolean stemEnglishPossessive;
@@ -99,7 +99,7 @@ public final class WordDelimiterIterator
* @param charTypeTable table containing character types
* @param splitOnCaseChange if true, causes "PowerShot" to be two tokens; ("Power-Shot" remains two parts regards)
* @param splitOnNumerics if true, causes "j2se" to be three tokens; "j" "2" "se"
- * @param stemEnglishPossessive if true, causes trailing "'s" to be removed for each subword: "O'Neil's" => "O", "Neil"
+ * @param stemEnglishPossessive if true, causes trailing "'s" to be removed for each subword: "O'Neil's" => "O", "Neil"
*/
WordDelimiterIterator(byte[] charTypeTable, boolean splitOnCaseChange, boolean splitOnNumerics, boolean stemEnglishPossessive) {
this.charTypeTable = charTypeTable;
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java Mon Dec 1 17:25:39 2014
@@ -33,7 +33,7 @@ import org.apache.lucene.util.AttributeF
* that characters between startOffset and endOffset in the original stream are
* the same as the term chars.
* <p>For example, "abcde" would be tokenized as (minGram=2, maxGram=3):
- * <table>
+ * <table summary="ngram tokens example">
* <tr><th>Term</th><td>ab</td><td>abc</td><td>bc</td><td>bcd</td><td>cd</td><td>cde</td><td>de</td></tr>
* <tr><th>Position increment</th><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr>
* <tr><th>Position length</th><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr>
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizer.java Mon Dec 1 17:25:39 2014
@@ -41,7 +41,7 @@ import org.apache.lucene.util.AttributeF
* {@link String#split(java.lang.String)}
* </p>
* <p>
- * Using group >= 0 selects the matching group as the token. For example, if you have:<br/>
+ * Using group >= 0 selects the matching group as the token. For example, if you have:<br/>
* <pre>
* pattern = \'([^\']+)\'
* group = 0
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java Mon Dec 1 17:25:39 2014
@@ -38,7 +38,7 @@ import org.apache.lucene.util.AttributeF
* {@link String#split(java.lang.String)}
* </p>
* <p>
- * Using group >= 0 selects the matching group as the token. For example, if you have:<br/>
+ * Using group >= 0 selects the matching group as the token. For example, if you have:<br/>
* <pre>
* pattern = \'([^\']+)\'
* group = 0
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilter.java Mon Dec 1 17:25:39 2014
@@ -24,10 +24,10 @@ import org.apache.lucene.analysis.tokena
import java.io.IOException;
/**
- * Reverse token string, for example "country" => "yrtnuoc".
+ * Reverse token string, for example "country" => "yrtnuoc".
* <p>
* If <code>marker</code> is supplied, then tokens will be also prepended by
- * that character. For example, with a marker of \u0001, "country" =>
+ * that character. For example, with a marker of \u0001, "country" =>
* "\u0001yrtnuoc". This is useful when implementing efficient leading
* wildcards search.
*/
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java Mon Dec 1 17:25:39 2014
@@ -39,7 +39,7 @@ import org.apache.lucene.util.AttributeS
* might be tokenized into shingles "please divide", "divide this",
* "this sentence", "sentence into", and "into shingles".
*
- * <p>This filter handles position increments > 1 by inserting filler tokens
+ * <p>This filter handles position increments > 1 by inserting filler tokens
* (tokens with termtext "_"). It does not handle a position increment of 0.
*/
public final class ShingleFilter extends TokenFilter {
@@ -356,7 +356,7 @@ public final class ShingleFilter extends
/**
* <p>Get the next token from the input stream.
- * <p>If the next token has <code>positionIncrement > 1</code>,
+ * <p>If the next token has <code>positionIncrement > 1</code>,
* <code>positionIncrement - 1</code> {@link #fillerToken}s are
* inserted first.
* @param target Where to put the new token; if null, a new instance is created.
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java Mon Dec 1 17:25:39 2014
@@ -32,11 +32,11 @@ import org.apache.lucene.util.CharsRefBu
* Parser for the Solr synonyms format.
* <ol>
* <li> Blank lines and lines starting with '#' are comments.
- * <li> Explicit mappings match any token sequence on the LHS of "=>"
+ * <li> Explicit mappings match any token sequence on the LHS of "=>"
* and replace with all alternatives on the RHS. These types of mappings
* ignore the expand parameter in the constructor.
* Example:
- * <blockquote>i-pod, i pod => ipod</blockquote>
+ * <blockquote>i-pod, i pod => ipod</blockquote>
* <li> Equivalent synonyms may be separated with commas and give
* no explicit mapping. In this case the mapping behavior will
* be taken from the expand parameter in the constructor. This allows
@@ -47,10 +47,10 @@ import org.apache.lucene.util.CharsRefBu
* <li> Multiple synonym mapping entries are merged.
* Example:
* <blockquote>
- * foo => foo bar<br>
- * foo => baz<br><br>
+ * foo => foo bar<br>
+ * foo => baz<br><br>
* is equivalent to<br><br>
- * foo => foo bar, baz
+ * foo => foo bar, baz
* </blockquote>
* </ol>
* @lucene.experimental
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java Mon Dec 1 17:25:39 2014
@@ -50,9 +50,9 @@ import org.apache.lucene.util.fst.FST;
* For example if you have these rules:
*
* <pre>
- * a -> x
- * a b -> y
- * b c d -> z
+ * a -> x
+ * a b -> y
+ * b c d -> z
* </pre>
*
* Then input <code>a b c d e</code> parses to <code>y b c
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java Mon Dec 1 17:25:39 2014
@@ -192,7 +192,7 @@ public class SynonymMap {
}
/**
- * Add a phrase->phrase synonym mapping.
+ * Add a phrase->phrase synonym mapping.
* Phrases are character sequences where words are
* separated with character zero (U+0000). Empty words
* (two U+0000s in a row) are not allowed in the input nor
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java Mon Dec 1 17:25:39 2014
@@ -84,7 +84,7 @@ public class CommonGramsFilterTest exten
* unigram or a bigram It also will not return a token for the final position
* if the final word is already in the preceding bigram Example:(three
* tokens/positions in)
- * "foo bar the"=>"foo:1|bar:2,bar-the:2|the:3=> "foo" "bar-the" (2 tokens
+ * "foo bar the"=>"foo:1|bar:2,bar-the:2|the:3=> "foo" "bar-the" (2 tokens
* out)
*
*/
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/Test64kAffixes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/Test64kAffixes.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/Test64kAffixes.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/Test64kAffixes.java Mon Dec 1 17:25:39 2014
@@ -27,7 +27,7 @@ import java.util.List;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.LuceneTestCase;
-/** Tests that > 64k affixes actually works and doesnt overflow some internal int */
+/** Tests that > 64k affixes actually works and doesnt overflow some internal int */
public class Test64kAffixes extends LuceneTestCase {
public void test() throws Exception {
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemmer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemmer.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemmer.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemmer.java Mon Dec 1 17:25:39 2014
@@ -55,7 +55,7 @@ public class TestLatvianStemmer extends
}
/**
- * decl II nouns with (s,t) -> š and (d,z) -> ž
+ * decl II nouns with (s,t) -> š and (d,z) -> ž
* palatalization will generally conflate to two stems
* due to the ambiguity (plural and singular).
*/
@@ -151,7 +151,7 @@ public class TestLatvianStemmer extends
}
/**
- * Genitive plural forms with (s,t) -> š and (d,z) -> ž
+ * Genitive plural forms with (s,t) -> š and (d,z) -> ž
* will not conflate due to ambiguity.
*/
public void testNouns5() throws IOException {
@@ -240,7 +240,7 @@ public class TestLatvianStemmer extends
/**
* Note: we intentionally don't handle the ambiguous
- * (s,t) -> š and (d,z) -> ž
+ * (s,t) -> š and (d,z) -> ž
*/
public void testPalatalization() throws IOException {
checkOneTerm(a, "krÄsns", "krÄsn"); // nom. sing.
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java Mon Dec 1 17:25:39 2014
@@ -18,6 +18,7 @@ package org.apache.lucene.analysis.misce
*/
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Token;
@@ -31,9 +32,9 @@ public class TestPrefixAndSuffixAwareTok
final MockTokenizer input = new MockTokenizer(MockTokenizer.WHITESPACE, false);
input.setReader(new StringReader("hello world"));
PrefixAndSuffixAwareTokenFilter ts = new PrefixAndSuffixAwareTokenFilter(
- new SingleTokenTokenStream(createToken("^", 0, 0)),
+ new CannedTokenStream(createToken("^", 0, 0)),
input,
- new SingleTokenTokenStream(createToken("$", 0, 0)));
+ new CannedTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
new String[] { "^", "hello", "world", "$" },
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java Mon Dec 1 17:25:39 2014
@@ -18,6 +18,7 @@ package org.apache.lucene.analysis.misce
*/
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Token;
@@ -31,8 +32,8 @@ public class TestPrefixAwareTokenFilter
PrefixAwareTokenFilter ts;
ts = new PrefixAwareTokenFilter(
- new SingleTokenTokenStream(createToken("a", 0, 1)),
- new SingleTokenTokenStream(createToken("b", 0, 1)));
+ new CannedTokenStream(createToken("a", 0, 1)),
+ new CannedTokenStream(createToken("b", 0, 1)));
assertTokenStreamContents(ts,
new String[] { "a", "b" },
new int[] { 0, 1 },
@@ -42,9 +43,9 @@ public class TestPrefixAwareTokenFilter
final MockTokenizer suffix = new MockTokenizer(MockTokenizer.WHITESPACE, false);
suffix.setReader(new StringReader("hello world"));
- ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(createToken("^", 0, 0)),
+ ts = new PrefixAwareTokenFilter(new CannedTokenStream(createToken("^", 0, 0)),
suffix);
- ts = new PrefixAwareTokenFilter(ts, new SingleTokenTokenStream(createToken("$", 0, 0)));
+ ts = new PrefixAwareTokenFilter(ts, new CannedTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
new String[] { "^", "hello", "world", "$" },
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java Mon Dec 1 17:25:39 2014
@@ -39,7 +39,7 @@ import static org.apache.lucene.analysis
*/
public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
- /***
+ /*
public void testPerformance() throws IOException {
String s = "now is the time-for all good men to come to-the aid of their country.";
Token tok = new Token();
@@ -62,14 +62,14 @@ public class TestWordDelimiterFilter ext
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
// test that subwords and catenated subwords have
// the correct offsets.
- WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("foo-bar", 5, 12)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+ WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("foo-bar", 5, 12)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "foo", "foobar", "bar" },
new int[] { 5, 5, 9 },
new int[] { 8, 12, 12 });
- wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("foo-bar", 5, 6)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+ wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("foo-bar", 5, 6)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "foo", "bar", "foobar" },
@@ -80,7 +80,7 @@ public class TestWordDelimiterFilter ext
@Test
public void testOffsetChange() throws Exception {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
- WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("übelkeit)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+ WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("übelkeit)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "übelkeit" },
@@ -91,7 +91,7 @@ public class TestWordDelimiterFilter ext
@Test
public void testOffsetChange2() throws Exception {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
- WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(übelkeit", 7, 17)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+ WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("(übelkeit", 7, 17)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "übelkeit" },
@@ -102,7 +102,7 @@ public class TestWordDelimiterFilter ext
@Test
public void testOffsetChange3() throws Exception {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
- WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(übelkeit", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+ WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("(übelkeit", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "übelkeit" },
@@ -113,7 +113,7 @@ public class TestWordDelimiterFilter ext
@Test
public void testOffsetChange4() throws Exception {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
- WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(foo,bar)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+ WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("(foo,bar)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "foo", "foobar", "bar"},
Modified: lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java Mon Dec 1 17:25:39 2014
@@ -281,7 +281,7 @@ public class TestPatternReplaceCharFilte
* A demonstration of how backtracking regular expressions can lead to relatively
* easy DoS attacks.
*
- * @see "http://swtch.com/~rsc/regexp/regexp1.html"
+ * @see <a href="http://swtch.com/~rsc/regexp/regexp1.html">"http://swtch.com/~rsc/regexp/regexp1.html"</a>
*/
@Ignore
public void testNastyPattern() throws Exception {
Modified: lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationKeyAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationKeyAnalyzer.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationKeyAnalyzer.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationKeyAnalyzer.java Mon Dec 1 17:25:39 2014
@@ -78,14 +78,6 @@ public final class ICUCollationKeyAnalyz
this.factory = new ICUCollationAttributeFactory(collator);
}
- /**
- * @deprecated Use {@link #ICUCollationKeyAnalyzer(Collator)}
- */
- @Deprecated
- public ICUCollationKeyAnalyzer(Version matchVersion, Collator collator) {
- this.factory = new ICUCollationAttributeFactory(collator);
- }
-
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Modified: lucene/dev/branches/lucene2878/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java Mon Dec 1 17:25:39 2014
@@ -56,7 +56,7 @@ import java.util.regex.Pattern;
* <li>
* Converts round-trip mappings in nfc.txt (containing '=')
* that map to at least one [:Diacritic:] character
- * into one-way mappings ('>' instead of '=').
+ * into one-way mappings ('>' instead of '=').
* </li>
* </ol>
*/
Modified: lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilter.java Mon Dec 1 17:25:39 2014
@@ -28,8 +28,8 @@ import java.io.Reader;
* <p>
* Sequences of iteration marks are supported. In case an illegal sequence of iteration
* marks is encountered, the implementation emits the illegal source character as-is
- * without considering its script. For example, with input "?ゝ", we get
- * "??" even though "?" isn't hiragana.
+ * without considering its script. For example, with input "?ゝ", we get
+ * "??" even though "?" isn't hiragana.
* </p>
* <p>
* Note that a full stop punctuation character "。" (U+3002) can not be iterated
Modified: lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java Mon Dec 1 17:25:39 2014
@@ -62,8 +62,8 @@ import org.apache.lucene.util.fst.FST;
* <p>
* This tokenizer uses a rolling Viterbi search to find the
* least cost segmentation (path) of the incoming characters.
- * For tokens that appear to be compound (> length 2 for all
- * Kanji, or > length 7 for non-Kanji), we see if there is a
+ * For tokens that appear to be compound (> length 2 for all
+ * Kanji, or > length 7 for non-Kanji), we see if there is a
* 2nd best segmentation of that token after applying
* penalties to the long tokens. If so, and the Mode is
* {@link Mode#SEARCH}, we output the alternate segmentation
Modified: lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/Token.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/Token.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/Token.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/Token.java Mon Dec 1 17:25:39 2014
@@ -158,7 +158,7 @@ public class Token {
/**
* Set the position length (in tokens) of this token. For normal
- * tokens this is 1; for compound tokens it's > 1.
+ * tokens this is 1; for compound tokens it's > 1.
*/
public void setPositionLength(int positionLength) {
this.positionLength = positionLength;
@@ -166,7 +166,7 @@ public class Token {
/**
* Get the length (in tokens) of this token. For normal
- * tokens this is 1; for compound tokens it's > 1.
+ * tokens this is 1; for compound tokens it's > 1.
* @return position length of token
*/
public int getPositionLength() {
Modified: lucene/dev/branches/lucene2878/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java Mon Dec 1 17:25:39 2014
@@ -40,7 +40,7 @@ public class MorfologikAnalyzer extends
* and have an associated <code>.info</code> metadata file. See the Morfologik project
* for details.
*
- * @see "http://morfologik.blogspot.com/"
+ * @see <a href="http://morfologik.blogspot.com/">http://morfologik.blogspot.com/</a>
*/
public MorfologikAnalyzer(final String dictionaryResource) {
this.dictionary = dictionaryResource;
Modified: lucene/dev/branches/lucene2878/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java Mon Dec 1 17:25:39 2014
@@ -47,10 +47,6 @@ public class MorfologikFilterFactory ext
*/
private final String dictionaryResource;
- /** Schema attribute. */
- @Deprecated
- public static final String DICTIONARY_SCHEMA_ATTRIBUTE = "dictionary";
-
/** Dictionary resource */
public static final String DICTIONARY_RESOURCE_ATTRIBUTE = "dictionary-resource";
@@ -58,14 +54,6 @@ public class MorfologikFilterFactory ext
public MorfologikFilterFactory(Map<String,String> args) {
super(args);
- // Be specific about no-longer-supported dictionary attribute.
- String dictionaryName = get(args, DICTIONARY_SCHEMA_ATTRIBUTE);
- if (dictionaryName != null && !dictionaryName.isEmpty()) {
- throw new IllegalArgumentException("The " + DICTIONARY_SCHEMA_ATTRIBUTE + " attribute is no "
- + "longer supported (Morfologik now offers one unified Polish dictionary): " + dictionaryName
- + ". Perhaps you wanted to use 'dictionary-resource' attribute instead?");
- }
-
dictionaryResource = get(args, DICTIONARY_RESOURCE_ATTRIBUTE, DEFAULT_DICTIONARY_RESOURCE);
if (!args.isEmpty()) {
Modified: lucene/dev/branches/lucene2878/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java Mon Dec 1 17:25:39 2014
@@ -58,7 +58,7 @@ public class Utility {
* @param lstartIndex start offset into larray
* @param rarray right array
* @param rstartIndex start offset into rarray
- * @return 0 if the arrays are equalï¼1 if larray > rarray, -1 if larray < rarray
+ * @return 0 if the arrays are equalï¼1 if larray > rarray, -1 if larray < rarray
*/
public static int compareArray(char[] larray, int lstartIndex, char[] rarray,
int rstartIndex) {
Modified: lucene/dev/branches/lucene2878/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java Mon Dec 1 17:25:39 2014
@@ -207,24 +207,6 @@ public class TestSmartChineseAnalyzer ex
}
}
- // LUCENE-3642
- public void testInvalidOffset() throws Exception {
- Analyzer analyzer = new Analyzer() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName) {
- Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
- TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
- filters = new WordTokenFilter(filters);
- return new TokenStreamComponents(tokenizer, filters);
- }
- };
-
- assertAnalyzesTo(analyzer, "mosfellsbær",
- new String[] { "mosfellsbaer" },
- new int[] { 0 },
- new int[] { 11 });
- }
-
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
checkRandomData(random(), new SmartChineseAnalyzer(), 1000*RANDOM_MULTIPLIER);
@@ -235,16 +217,4 @@ public class TestSmartChineseAnalyzer ex
Random random = random();
checkRandomData(random, new SmartChineseAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
}
-
- public void testEmptyTerm() throws IOException {
- Random random = random();
- Analyzer a = new Analyzer() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName) {
- Tokenizer tokenizer = new KeywordTokenizer();
- return new TokenStreamComponents(tokenizer, new WordTokenFilter(tokenizer));
- }
- };
- checkAnalysisConsistency(random, a, random.nextBoolean(), "");
- }
}
Modified: lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java (original)
+++ lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java Mon Dec 1 17:25:39 2014
@@ -30,7 +30,7 @@ public interface QueryMaker {
/**
* Create the next query, of the given size.
* @param size the size of the query - number of terms, etc.
- * @exception Exception if cannot make the query, or if size>0 was specified but this feature is not supported.
+ * @exception Exception if cannot make the query, or if size > 0 was specified but this feature is not supported.
*/
public Query makeQuery (int size) throws Exception;
Modified: lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialFileQueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialFileQueryMaker.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialFileQueryMaker.java (original)
+++ lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SpatialFileQueryMaker.java Mon Dec 1 17:25:39 2014
@@ -39,7 +39,7 @@ import java.util.Properties;
* It's parsed by {@link com.spatial4j.core.context.SpatialContext#readShapeFromWkt(String)} (String)} and then
* further manipulated via a configurable {@link SpatialDocMaker.ShapeConverter}. When using point
* data, it's likely you'll want to configure the shape converter so that the query shapes actually
- * cover a region. The queries are all created & cached in advance. This query maker works in
+ * cover a region. The queries are all created and cached in advance. This query maker works in
* conjunction with {@link SpatialDocMaker}. See spatial.alg for a listing of options, in
* particular the options starting with "query.".
*/
Modified: lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java (original)
+++ lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java Mon Dec 1 17:25:39 2014
@@ -35,7 +35,7 @@ public class TaskStats implements Clonea
/** task start time */
private long start;
- /** task elapsed time. elapsed >= 0 indicates run completion! */
+ /** task elapsed time. elapsed >= 0 indicates run completion! */
private long elapsed = -1;
/** max tot mem during task */
Modified: lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java (original)
+++ lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java Mon Dec 1 17:25:39 2014
@@ -393,7 +393,7 @@ public class AnalyzerFactoryTask extends
/**
* This method looks up a class with its fully qualified name (FQN), or a short-name
* class-simplename, or with a package suffix, assuming "org.apache.lucene.analysis."
- * as the package prefix (e.g. "standard.ClassicTokenizerFactory" ->
+ * as the package prefix (e.g. "standard.ClassicTokenizerFactory" ->
* "org.apache.lucene.analysis.standard.ClassicTokenizerFactory").
*
* If className contains a period, the class is first looked up as-is, assuming that it
Modified: lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java (original)
+++ lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java Mon Dec 1 17:25:39 2014
@@ -99,7 +99,7 @@ public class NewAnalyzerTask extends Per
* <p/>
* Analyzer names may also refer to previously defined AnalyzerFactory's.
* <p/>
- * Example Declaration: {"NewAnalyzer" NewAnalyzer(WhitespaceAnalyzer, SimpleAnalyzer, StopAnalyzer, standard.StandardAnalyzer) >
+ * Example Declaration: {"NewAnalyzer" NewAnalyzer(WhitespaceAnalyzer, SimpleAnalyzer, StopAnalyzer, standard.StandardAnalyzer) >
* <p/>
* Example AnalyzerFactory usage:
* <pre>
Modified: lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (original)
+++ lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java Mon Dec 1 17:25:39 2014
@@ -268,7 +268,7 @@ public abstract class ReadTask extends P
}
/**
- * Return true if, with search & results traversing, docs should be retrieved.
+ * Return true if, with search and results traversing, docs should be retrieved.
*/
public abstract boolean withRetrieve();
Modified: lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java (original)
+++ lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java Mon Dec 1 17:25:39 2014
@@ -53,7 +53,7 @@ import java.util.Collections;
* <li>fields - The fields to highlight. If not specified all fields will be highlighted (or at least attempted)</li>
* </ul>
* Example:
- * <pre>"SearchHlgtSameRdr" SearchTravRetHighlight(size[10],highlight[10],mergeContiguous[true],maxFrags[3],fields[body]) > : 1000
+ * <pre>"SearchHlgtSameRdr" SearchTravRetHighlight(size[10],highlight[10],mergeContiguous[true],maxFrags[3],fields[body]) > : 1000
* </pre>
*
* Documents must be stored in order for this task to work. Additionally, term vector positions can be used as well.
Modified: lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java (original)
+++ lucene/dev/branches/lucene2878/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java Mon Dec 1 17:25:39 2014
@@ -47,7 +47,7 @@ import java.util.Collections;
* <li>fields - The fields to highlight. If not specified all fields will be highlighted (or at least attempted)</li>
* </ul>
* Example:
- * <pre>"SearchVecHlgtSameRdr" SearchTravRetVectorHighlight(size[10],highlight[10],maxFrags[3],fields[body]) > : 1000
+ * <pre>"SearchVecHlgtSameRdr" SearchTravRetVectorHighlight(size[10],highlight[10],maxFrags[3],fields[body]) > : 1000
* </pre>
*
* Fields must be stored and term vector offsets and positions in order must be true for this task to work.
Modified: lucene/dev/branches/lucene2878/lucene/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/build.xml?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/build.xml (original)
+++ lucene/dev/branches/lucene2878/lucene/build.xml Mon Dec 1 17:25:39 2014
@@ -60,6 +60,10 @@
description="Runs pitests (core, modules and back-compat)"
/>
+ <target name="beast">
+ <fail message="The Beast only works inside of individual modules"/>
+ </target>
+
<target name="compile-core" depends="compile-lucene-core"/>
<!-- lucene/test-framework is excluded from compilation -->
@@ -448,7 +452,7 @@
</sequential>
</target>
- <target name="test-modules" depends="compile-test">
+ <target name="test-modules">
<modules-crawl target="test" failonerror="true"/>
</target>
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/TermsIndexReaderBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/TermsIndexReaderBase.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/TermsIndexReaderBase.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/TermsIndexReaderBase.java Mon Dec 1 17:25:39 2014
@@ -55,7 +55,7 @@ public abstract class TermsIndexReaderBa
*/
public static abstract class FieldIndexEnum {
- /** Seeks to "largest" indexed term that's <=
+ /** Seeks to "largest" indexed term that's <=
* term; returns file pointer index (into the main
* terms index file) for that term */
public abstract long seek(BytesRef term) throws IOException;
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java Mon Dec 1 17:25:39 2014
@@ -109,7 +109,7 @@ public class VariableGapTermsIndexWriter
}
}
- /** Sets an index term when docFreq >= docFreqThresh, or
+ /** Sets an index term when docFreq >= docFreqThresh, or
* every interval terms. This should reduce seek time
* to high docFreq terms. */
public static final class EveryNOrDocFreqTermSelector extends IndexTermSelector {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java Mon Dec 1 17:25:39 2014
@@ -82,7 +82,7 @@ import org.apache.lucene.util.packed.Pac
/**
* This is just like {@link BlockTreeTermsWriter}, except it also stores a version per term, and adds a method to its TermsEnum
- * implementation to seekExact only if the version is >= the specified version. The version is added to the terms index to avoid seeking if
+ * implementation to seekExact only if the version is >= the specified version. The version is added to the terms index to avoid seeking if
* no term in the block has a high enough version. The term blocks file is .tiv and the terms index extension is .tipv.
*
* @lucene.experimental
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java Mon Dec 1 17:25:39 2014
@@ -55,9 +55,9 @@ import org.apache.lucene.util.automaton.
/** Wraps {@link Lucene50PostingsFormat} format for on-disk
* storage, but then at read time loads and stores all
- * terms & postings directly in RAM as byte[], int[].
+ * terms and postings directly in RAM as byte[], int[].
*
- * <p><b><font color=red>WARNING</font></b>: This is
+ * <p><b>WARNING</b>: This is
* exceptionally RAM intensive: it makes no effort to
* compress the postings data, storing terms as separate
* byte[] and postings as separate int[], but as a result it
@@ -90,7 +90,7 @@ public final class DirectPostingsFormat
/** minSkipCount is how many terms in a row must have the
* same prefix before we put a skip pointer down. Terms
- * with docFreq <= lowFreqCutoff will use a single int[]
+ * with docFreq <= lowFreqCutoff will use a single int[]
* to hold all docs, freqs, position and offsets; terms
* with higher docFreq will use separate arrays. */
public DirectPostingsFormat(int minSkipCount, int lowFreqCutoff) {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java Mon Dec 1 17:25:39 2014
@@ -748,7 +748,7 @@ public class FSTOrdTermsReader extends F
}
/** Load frame for target arc(node) on fst, so that
- * arc.label >= label and !fsa.reject(arc.label) */
+ * arc.label >= label and !fsa.reject(arc.label) */
Frame loadCeilFrame(int label, Frame top, Frame frame) throws IOException {
FST.Arc<Long> arc = frame.arc;
arc = Util.readCeilArc(label, fst, top.arc, arc, fstReader);
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java Mon Dec 1 17:25:39 2014
@@ -645,7 +645,7 @@ public class FSTTermsReader extends Fiel
}
/** Load frame for target arc(node) on fst, so that
- * arc.label >= label and !fsa.reject(arc.label) */
+ * arc.label >= label and !fsa.reject(arc.label) */
Frame loadCeilFrame(int label, Frame top, Frame frame) throws IOException {
FST.Arc<FSTTermOutputs.TermData> arc = frame.fstArc;
arc = Util.readCeilArc(label, fst, top.fstArc, arc, fstReader);
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java Mon Dec 1 17:25:39 2014
@@ -68,7 +68,7 @@ import org.apache.lucene.util.packed.Pac
// it pulls the FST directly from what you wrote w/o going
// to disk.
-/** Stores terms & postings (docs, positions, payloads) in
+/** Stores terms and postings (docs, positions, payloads) in
* RAM, using an FST.
*
* <p>Note that this codec implements advance as a linear
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java Mon Dec 1 17:25:39 2014
@@ -31,7 +31,7 @@ import org.apache.lucene.codecs.TermVect
/**
* plain text index format.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public final class SimpleTextCodec extends Codec {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCompoundFormat.java Mon Dec 1 17:25:39 2014
@@ -44,7 +44,7 @@ import org.apache.lucene.util.StringHelp
/**
* plain text compound format.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextCompoundFormat extends CompoundFormat {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesFormat.java Mon Dec 1 17:25:39 2014
@@ -28,7 +28,7 @@ import org.apache.lucene.index.SegmentWr
/**
* plain text doc values format.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* <p>
* the .dat file contains the data.
* for numbers this is a "fixed-width" file, for example a single byte range:
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java Mon Dec 1 17:25:39 2014
@@ -42,7 +42,7 @@ import org.apache.lucene.util.StringHelp
/**
* plaintext field infos format
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextLiveDocsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextLiveDocsFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextLiveDocsFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextLiveDocsFormat.java Mon Dec 1 17:25:39 2014
@@ -42,7 +42,7 @@ import org.apache.lucene.util.UnicodeUti
/**
* reads/writes plaintext live docs
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextLiveDocsFormat extends LiveDocsFormat {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsFormat.java Mon Dec 1 17:25:39 2014
@@ -33,7 +33,7 @@ import org.apache.lucene.util.Accountabl
/**
* plain-text norms format.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
*
* @lucene.experimental
*/
@@ -53,7 +53,7 @@ public class SimpleTextNormsFormat exten
/**
* Reads plain-text norms.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
*
* @lucene.experimental
*/
@@ -100,7 +100,7 @@ public class SimpleTextNormsFormat exten
/**
* Writes plain-text norms.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
*
* @lucene.experimental
*/
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java Mon Dec 1 17:25:39 2014
@@ -36,14 +36,13 @@ import org.apache.lucene.store.IOContext
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.Version;
/**
* plain text segments file format.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextSegmentInfoFormat extends SegmentInfoFormat {
@@ -63,9 +62,7 @@ public class SimpleTextSegmentInfoFormat
public SegmentInfo read(Directory directory, String segmentName, byte[] segmentID, IOContext context) throws IOException {
BytesRefBuilder scratch = new BytesRefBuilder();
String segFileName = IndexFileNames.segmentFileName(segmentName, "", SimpleTextSegmentInfoFormat.SI_EXTENSION);
- ChecksumIndexInput input = directory.openChecksumInput(segFileName, context);
- boolean success = false;
- try {
+ try (ChecksumIndexInput input = directory.openChecksumInput(segFileName, context)) {
SimpleTextUtil.readLine(input, scratch);
assert StringHelper.startsWith(scratch.get(), SI_VERSION);
final Version version;
@@ -125,14 +122,7 @@ public class SimpleTextSegmentInfoFormat
SegmentInfo info = new SegmentInfo(directory, version, segmentName, docCount,
isCompoundFile, null, diagnostics, id);
info.setFiles(files);
- success = true;
return info;
- } finally {
- if (!success) {
- IOUtils.closeWhileHandlingException(input);
- } else {
- input.close();
- }
}
}
@@ -146,10 +136,7 @@ public class SimpleTextSegmentInfoFormat
String segFileName = IndexFileNames.segmentFileName(si.name, "", SimpleTextSegmentInfoFormat.SI_EXTENSION);
si.addFile(segFileName);
- boolean success = false;
- IndexOutput output = dir.createOutput(segFileName, ioContext);
-
- try {
+ try (IndexOutput output = dir.createOutput(segFileName, ioContext)) {
BytesRefBuilder scratch = new BytesRefBuilder();
SimpleTextUtil.write(output, SI_VERSION);
@@ -201,14 +188,6 @@ public class SimpleTextSegmentInfoFormat
SimpleTextUtil.writeNewline(output);
SimpleTextUtil.writeChecksum(output, scratch);
- success = true;
- } finally {
- if (!success) {
- IOUtils.closeWhileHandlingException(output);
- IOUtils.deleteFilesIgnoringExceptions(dir, segFileName);
- } else {
- output.close();
- }
}
}
}
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsFormat.java Mon Dec 1 17:25:39 2014
@@ -30,7 +30,7 @@ import org.apache.lucene.store.IOContext
/**
* plain text stored fields format.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextStoredFieldsFormat extends StoredFieldsFormat {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java Mon Dec 1 17:25:39 2014
@@ -46,7 +46,7 @@ import static org.apache.lucene.codecs.s
/**
* reads plaintext stored fields
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextStoredFieldsReader extends StoredFieldsReader {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java Mon Dec 1 17:25:39 2014
@@ -34,7 +34,7 @@ import org.apache.lucene.util.IOUtils;
/**
* Writes plain-text stored fields.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextStoredFieldsWriter extends StoredFieldsWriter {
@@ -70,7 +70,7 @@ public class SimpleTextStoredFieldsWrite
success = true;
} finally {
if (!success) {
- abort();
+ IOUtils.closeWhileHandlingException(this);
}
}
}
@@ -152,14 +152,6 @@ public class SimpleTextStoredFieldsWrite
}
@Override
- public void abort() {
- try {
- close();
- } catch (Throwable ignored) {}
- IOUtils.deleteFilesIgnoringExceptions(directory, IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION));
- }
-
- @Override
public void finish(FieldInfos fis, int numDocs) throws IOException {
if (numDocsWritten != numDocs) {
throw new RuntimeException("mergeFields produced an invalid result: docCount is " + numDocs
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsFormat.java Mon Dec 1 17:25:39 2014
@@ -30,7 +30,7 @@ import org.apache.lucene.store.IOContext
/**
* plain text term vectors format.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextTermVectorsFormat extends TermVectorsFormat {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java Mon Dec 1 17:25:39 2014
@@ -67,7 +67,7 @@ import static org.apache.lucene.codecs.s
/**
* Reads plain-text term vectors.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextTermVectorsReader extends TermVectorsReader {
Modified: lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsWriter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsWriter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsWriter.java Mon Dec 1 17:25:39 2014
@@ -33,7 +33,7 @@ import org.apache.lucene.util.IOUtils;
/**
* Writes plain-text term vectors.
* <p>
- * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+ * <b>FOR RECREATIONAL USE ONLY</b>
* @lucene.experimental
*/
public class SimpleTextTermVectorsWriter extends TermVectorsWriter {
@@ -74,7 +74,7 @@ public class SimpleTextTermVectorsWriter
success = true;
} finally {
if (!success) {
- abort();
+ IOUtils.closeWhileHandlingException(this);
}
}
}
@@ -164,14 +164,6 @@ public class SimpleTextTermVectorsWriter
}
@Override
- public void abort() {
- try {
- close();
- } catch (Throwable ignored) {}
- IOUtils.deleteFilesIgnoringExceptions(directory, IndexFileNames.segmentFileName(segment, "", VECTORS_EXTENSION));
- }
-
- @Override
public void finish(FieldInfos fis, int numDocs) throws IOException {
if (numDocsWritten != numDocs) {
throw new RuntimeException("mergeVectors produced an invalid result: mergedDocs is " + numDocs + " but vec numDocs is " + numDocsWritten + " file=" + out.toString() + "; now aborting this merge to prevent index corruption");
Modified: lucene/dev/branches/lucene2878/lucene/common-build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/common-build.xml?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/common-build.xml (original)
+++ lucene/dev/branches/lucene2878/lucene/common-build.xml Mon Dec 1 17:25:39 2014
@@ -164,7 +164,7 @@
<property name="javac.debug" value="on"/>
<property name="javac.source" value="1.8"/>
<property name="javac.target" value="1.8"/>
- <property name="javac.args" value="-Xlint -Xlint:-deprecation -Xlint:-serial -Xlint:-options"/>
+ <property name="javac.args" value="-Xlint -Xlint:-deprecation -Xlint:-serial -Xlint:-options -Xdoclint:all/protected -Xdoclint:-html -Xdoclint:-missing"/>
<property name="javadoc.link" value="http://download.oracle.com/javase/8/docs/api/"/>
<property name="javadoc.link.junit" value="http://junit.sourceforge.net/javadoc/"/>
<property name="javadoc.packagelist.dir" location="${common.dir}/tools/javadoc"/>
@@ -372,8 +372,8 @@
<echo level="warning" message="WARN: Linting documentation HTML is not supported on this Java version (${build.java.runtime}) / JVM (${java.vm.name}). NOTHING DONE!"/>
</target>
- <!-- for now disable doclint: -->
- <property name="javadoc.args" value="-Xdoclint:none"/>
+ <!-- for now enable only some doclint: -->
+ <property name="javadoc.args" value="-Xdoclint:all -Xdoclint:-html -Xdoclint:-missing"/>
<!-- Import custom ANT tasks. -->
<import file="${common.dir}/tools/custom-tasks.xml" />
@@ -1351,6 +1351,11 @@ ${tests-output}/junit4-*.suites - pe
<!-- Beast the actual tests (must be wrapped with -init-totals, -check-totals) -->
<target name="-beast" depends="resolve-groovy">
+ <fail message="The Beast only works inside of individual modules (where 'junit.classpath' is defined)">
+ <condition>
+ <not><isreference refid="junit.classpath"/></not>
+ </condition>
+ </fail>
<groovy taskname="beaster"><![CDATA[
import org.apache.tools.ant.BuildException;
import org.apache.tools.ant.BuildLogger;
Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/Token.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/Token.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/Token.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/Token.java Mon Dec 1 17:25:39 2014
@@ -79,8 +79,8 @@ public class Token extends PackedTokenAt
public Token() {
}
- /** Constructs a Token with the given term text, and start
- * & end offsets. The type defaults to "word."
+ /** Constructs a Token with the given term text, start
+ * and end offsets. The type defaults to "word."
* <b>NOTE:</b> for better indexing speed you should
* instead use the char[] termBuffer methods to set the
* term text.
Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java Mon Dec 1 17:25:39 2014
@@ -78,7 +78,7 @@ public class TokenStreamToAutomaton {
}
}
- /** Subclass & implement this if you need to change the
+ /** Subclass and implement this if you need to change the
* token (such as escaping certain bytes) before it's
* turned into a graph. */
protected BytesRef changeToken(BytesRef in) {
Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttribute.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttribute.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttribute.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttribute.java Mon Dec 1 17:25:39 2014
@@ -48,7 +48,7 @@ public interface CharTermAttribute exten
/** Grows the termBuffer to at least size newSize, preserving the
* existing content.
* @param newSize minimum size of the new termBuffer
- * @return newly created termBuffer with length >= newSize
+ * @return newly created termBuffer with {@code length >= newSize}
*/
public char[] resizeBuffer(int newSize);
Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java Mon Dec 1 17:25:39 2014
@@ -178,7 +178,7 @@ public final class CodecUtil {
* @param maxVersion The maximum supported expected version number.
* @return The actual version found, when a valid header is found
* that matches <code>codec</code>, with an actual version
- * where <code>minVersion <= actual <= maxVersion</code>.
+ * where {@code minVersion <= actual <= maxVersion}.
* Otherwise an exception is thrown.
* @throws CorruptIndexException If the first four bytes are not
* {@link #CODEC_MAGIC}, or if the actual codec found is
@@ -238,7 +238,7 @@ public final class CodecUtil {
* @param expectedSuffix The expected auxiliary suffix for this file.
* @return The actual version found, when a valid header is found
* that matches <code>codec</code>, with an actual version
- * where <code>minVersion <= actual <= maxVersion</code>,
+ * where {@code minVersion <= actual <= maxVersion},
* and matching <code>expectedID</code> and <code>expectedSuffix</code>
* Otherwise an exception is thrown.
* @throws CorruptIndexException If the first four bytes are not
@@ -422,18 +422,6 @@ public final class CodecUtil {
}
}
- /**
- * Checks that the stream is positioned at the end, and throws exception
- * if it is not.
- * @deprecated Use {@link #checkFooter} instead, this should only used for files without checksums
- */
- @Deprecated
- public static void checkEOF(IndexInput in) throws IOException {
- if (in.getFilePointer() != in.length()) {
- throw new CorruptIndexException("did not read all bytes from file: read " + in.getFilePointer() + " vs size " + in.length(), in);
- }
- }
-
/**
* Clones the provided input, reads all bytes from the file, and calls {@link #checkFooter}
* <p>
Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListWriter.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListWriter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListWriter.java Mon Dec 1 17:25:39 2014
@@ -42,7 +42,7 @@ import org.apache.lucene.util.MathUtil;
* Skip level i contains every skipInterval-th entry from skip level i-1.
* Therefore the number of entries on level i is: floor(df / ((skipInterval ^ (i + 1))).
*
- * Each skip entry on a level i>0 contains a pointer to the corresponding skip entry in list i-1.
+ * Each skip entry on a level {@code i>0} contains a pointer to the corresponding skip entry in list i-1.
* This guarantees a logarithmic amount of skips to find the target document.
*
* While this class takes care of writing the different skip levels,
Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java Mon Dec 1 17:25:39 2014
@@ -180,7 +180,7 @@ public abstract class PushPostingsWriter
* for the field. */
public abstract void startDoc(int docID, int freq) throws IOException;
- /** Add a new position & payload, and start/end offset. A
+ /** Add a new position and payload, and start/end offset. A
* null payload means no payload; a non-null payload with
* zero length also means no payload. Caller may reuse
* the {@link BytesRef} for the payload between calls
@@ -188,7 +188,7 @@ public abstract class PushPostingsWriter
* and <code>endOffset</code> will be -1 when offsets are not indexed. */
public abstract void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException;
- /** Called when we are done adding positions & payloads
+ /** Called when we are done adding positions and payloads
* for each doc. */
public abstract void finishDoc() throws IOException;
}
Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java?rev=1642718&r1=1642717&r2=1642718&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java Mon Dec 1 17:25:39 2014
@@ -48,7 +48,8 @@ public abstract class SegmentInfoFormat
public abstract SegmentInfo read(Directory directory, String segmentName, byte segmentID[], IOContext context) throws IOException;
/**
- * Write {@link SegmentInfo} data.
+ * Write {@link SegmentInfo} data.
+ * The codec must add its SegmentInfo filename(s) to {@code info} before doing i/o.
* @throws IOException If an I/O error occurs
*/
public abstract void write(Directory dir, SegmentInfo info, IOContext ioContext) throws IOException;