You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2014/03/16 20:39:37 UTC
svn commit: r1578144 [19/37] - in /lucene/dev/branches/lucene5376_2: ./
dev-tools/ dev-tools/idea/.idea/libraries/
dev-tools/idea/solr/contrib/dataimporthandler/
dev-tools/idea/solr/contrib/map-reduce/ dev-tools/idea/solr/core/src/test/
dev-tools/scrip...
Modified: lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java Sun Mar 16 19:39:10 2014
@@ -648,9 +648,9 @@ public class AnalyzingSuggesterTest exte
int numQueries = atLeast(1000);
- final List<TermFreq2> slowCompletor = new ArrayList<TermFreq2>();
- final TreeSet<String> allPrefixes = new TreeSet<String>();
- final Set<String> seen = new HashSet<String>();
+ final List<TermFreq2> slowCompletor = new ArrayList<>();
+ final TreeSet<String> allPrefixes = new TreeSet<>();
+ final Set<String> seen = new HashSet<>();
boolean doPayloads = random().nextBoolean();
@@ -742,7 +742,7 @@ public class AnalyzingSuggesterTest exte
if (VERBOSE) {
// Don't just sort original list, to avoid VERBOSE
// altering the test:
- List<TermFreq2> sorted = new ArrayList<TermFreq2>(slowCompletor);
+ List<TermFreq2> sorted = new ArrayList<>(slowCompletor);
Collections.sort(sorted);
for(TermFreq2 ent : sorted) {
System.out.println(" surface='" + ent.surfaceForm + "' analyzed='" + ent.analyzedForm + "' weight=" + ent.weight);
@@ -768,7 +768,7 @@ public class AnalyzingSuggesterTest exte
List<LookupResult> r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN);
// 2. go thru whole set to find suggestions:
- List<TermFreq2> matches = new ArrayList<TermFreq2>();
+ List<TermFreq2> matches = new ArrayList<>();
// "Analyze" the key:
String[] tokens = prefix.split(" ");
@@ -1194,7 +1194,7 @@ public class AnalyzingSuggesterTest exte
@SafeVarargs
public final <T> Iterable<T> shuffle(T...values) {
- final List<T> asList = new ArrayList<T>(values.length);
+ final List<T> asList = new ArrayList<>(values.length);
for (T value : values) {
asList.add(value);
}
Modified: lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java Sun Mar 16 19:39:10 2014
@@ -23,7 +23,6 @@ import org.apache.lucene.analysis.util.C
import org.apache.lucene.search.suggest.Input;
import org.apache.lucene.search.suggest.InputArrayIterator;
import org.apache.lucene.search.suggest.Lookup;
-import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
@@ -49,15 +48,10 @@ public class BlendedInfixSuggesterTest e
File tempDir = TestUtil.getTempDir("BlendedInfixSuggesterTest");
Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET);
- BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a,
- AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
- BlendedInfixSuggester.BlenderType.POSITION_LINEAR,
- BlendedInfixSuggester.DEFAULT_NUM_FACTOR) {
- @Override
- protected Directory getDirectory(File path) {
- return newFSDirectory(path);
- }
- };
+ BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
+ AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
+ BlendedInfixSuggester.BlenderType.POSITION_LINEAR,
+ BlendedInfixSuggester.DEFAULT_NUM_FACTOR);
suggester.build(new InputArrayIterator(keys));
// we query for star wars and check that the weight
@@ -94,12 +88,7 @@ public class BlendedInfixSuggesterTest e
Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET);
// BlenderType.LINEAR is used by default (remove position*10%)
- BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, tempDir, a) {
- @Override
- protected Directory getDirectory(File path) {
- return newFSDirectory(path);
- }
- };
+ BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a);
suggester.build(new InputArrayIterator(keys));
assertEquals(w, getInResults(suggester, "top", pl, 1));
@@ -109,13 +98,8 @@ public class BlendedInfixSuggesterTest e
suggester.close();
// BlenderType.RECIPROCAL is using 1/(1+p) * w where w is weight and p the position of the word
- suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a,
- AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1) {
- @Override
- protected Directory getDirectory(File path) {
- return newFSDirectory(path);
- }
- };
+ suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
+ AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1);
suggester.build(new InputArrayIterator(keys));
assertEquals(w, getInResults(suggester, "top", pl, 1));
@@ -145,13 +129,8 @@ public class BlendedInfixSuggesterTest e
Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET);
// if factor is small, we don't get the expected element
- BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a,
- AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1) {
- @Override
- protected Directory getDirectory(File path) {
- return newFSDirectory(path);
- }
- };
+ BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
+ AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1);
suggester.build(new InputArrayIterator(keys));
@@ -169,13 +148,8 @@ public class BlendedInfixSuggesterTest e
suggester.close();
// if we increase the factor we have it
- suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a,
- AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 2) {
- @Override
- protected Directory getDirectory(File path) {
- return newFSDirectory(path);
- }
- };
+ suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
+ AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 2);
suggester.build(new InputArrayIterator(keys));
// we have it
@@ -205,14 +179,9 @@ public class BlendedInfixSuggesterTest e
Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET);
// if factor is small, we don't get the expected element
- BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, tempDir, a, a,
- AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL,
- BlendedInfixSuggester.DEFAULT_NUM_FACTOR) {
- @Override
- protected Directory getDirectory(File path) {
- return newFSDirectory(path);
- }
- };
+ BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
+ AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS, BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL,
+ BlendedInfixSuggester.DEFAULT_NUM_FACTOR);
suggester.build(new InputArrayIterator(keys));
Modified: lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java Sun Mar 16 19:39:10 2014
@@ -54,7 +54,7 @@ import org.apache.lucene.util.fst.Util;
public class FuzzySuggesterTest extends LuceneTestCase {
public void testRandomEdits() throws IOException {
- List<Input> keys = new ArrayList<Input>();
+ List<Input> keys = new ArrayList<>();
int numTerms = atLeast(100);
for (int i = 0; i < numTerms; i++) {
keys.add(new Input("boo" + TestUtil.randomSimpleString(random()), 1 + random().nextInt(100)));
@@ -75,7 +75,7 @@ public class FuzzySuggesterTest extends
}
public void testNonLatinRandomEdits() throws IOException {
- List<Input> keys = new ArrayList<Input>();
+ List<Input> keys = new ArrayList<>();
int numTerms = atLeast(100);
for (int i = 0; i < numTerms; i++) {
keys.add(new Input("бÑÑ" + TestUtil.randomSimpleString(random()), 1 + random().nextInt(100)));
@@ -596,9 +596,9 @@ public class FuzzySuggesterTest extends
int numQueries = atLeast(100);
- final List<TermFreqPayload2> slowCompletor = new ArrayList<TermFreqPayload2>();
- final TreeSet<String> allPrefixes = new TreeSet<String>();
- final Set<String> seen = new HashSet<String>();
+ final List<TermFreqPayload2> slowCompletor = new ArrayList<>();
+ final TreeSet<String> allPrefixes = new TreeSet<>();
+ final Set<String> seen = new HashSet<>();
Input[] keys = new Input[numQueries];
@@ -674,7 +674,7 @@ public class FuzzySuggesterTest extends
if (VERBOSE) {
// Don't just sort original list, to avoid VERBOSE
// altering the test:
- List<TermFreqPayload2> sorted = new ArrayList<TermFreqPayload2>(slowCompletor);
+ List<TermFreqPayload2> sorted = new ArrayList<>(slowCompletor);
Collections.sort(sorted);
for(TermFreqPayload2 ent : sorted) {
System.out.println(" surface='" + ent.surfaceForm + " analyzed='" + ent.analyzedForm + "' weight=" + ent.weight);
@@ -696,7 +696,7 @@ public class FuzzySuggesterTest extends
List<LookupResult> r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN);
// 2. go thru whole set to find suggestions:
- List<LookupResult> matches = new ArrayList<LookupResult>();
+ List<LookupResult> matches = new ArrayList<>();
// "Analyze" the key:
String[] tokens = prefix.split(" ");
@@ -929,8 +929,8 @@ public class FuzzySuggesterTest extends
public void testRandom2() throws Throwable {
final int NUM = atLeast(200);
- final List<Input> answers = new ArrayList<Input>();
- final Set<String> seen = new HashSet<String>();
+ final List<Input> answers = new ArrayList<>();
+ final Set<String> seen = new HashSet<>();
for(int i=0;i<NUM;i++) {
final String s = randomSimpleString(8);
if (!seen.contains(s)) {
@@ -1005,7 +1005,7 @@ public class FuzzySuggesterTest extends
}
private List<LookupResult> slowFuzzyMatch(int prefixLen, int maxEdits, boolean allowTransposition, List<Input> answers, String frag) {
- final List<LookupResult> results = new ArrayList<LookupResult>();
+ final List<LookupResult> results = new ArrayList<>();
final int fragLen = frag.length();
for(Input tf : answers) {
//System.out.println(" check s=" + tf.term.utf8ToString());
Modified: lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java Sun Mar 16 19:39:10 2014
@@ -293,7 +293,7 @@ public class TestFreeTextSuggester exten
public void testRandom() throws IOException {
String[] terms = new String[TestUtil.nextInt(random(), 2, 10)];
- Set<String> seen = new HashSet<String>();
+ Set<String> seen = new HashSet<>();
while (seen.size() < terms.length) {
String token = TestUtil.randomSimpleString(random(), 1, 5);
if (!seen.contains(token)) {
@@ -367,12 +367,12 @@ public class TestFreeTextSuggester exten
});
// Build inefficient but hopefully correct model:
- List<Map<String,Integer>> gramCounts = new ArrayList<Map<String,Integer>>(grams);
+ List<Map<String,Integer>> gramCounts = new ArrayList<>(grams);
for(int gram=0;gram<grams;gram++) {
if (VERBOSE) {
System.out.println("TEST: build model for gram=" + gram);
}
- Map<String,Integer> model = new HashMap<String,Integer>();
+ Map<String,Integer> model = new HashMap<>();
gramCounts.add(model);
for(String[] doc : docs) {
for(int i=0;i<doc.length-gram;i++) {
@@ -429,9 +429,9 @@ public class TestFreeTextSuggester exten
}
// Expected:
- List<LookupResult> expected = new ArrayList<LookupResult>();
+ List<LookupResult> expected = new ArrayList<>();
double backoff = 1.0;
- seen = new HashSet<String>();
+ seen = new HashSet<>();
if (VERBOSE) {
System.out.println(" compute expected");
@@ -494,7 +494,7 @@ public class TestFreeTextSuggester exten
if (VERBOSE) {
System.out.println(" find terms w/ prefix=" + tokens[tokens.length-1]);
}
- List<LookupResult> tmp = new ArrayList<LookupResult>();
+ List<LookupResult> tmp = new ArrayList<>();
for(String term : terms) {
if (term.startsWith(tokens[tokens.length-1])) {
if (VERBOSE) {
@@ -587,7 +587,7 @@ public class TestFreeTextSuggester exten
@SafeVarargs
private final <T> Iterable<T> shuffle(T...values) {
- final List<T> asList = new ArrayList<T>(values.length);
+ final List<T> asList = new ArrayList<>(values.length);
for (T value : values) {
asList.add(value);
}
Modified: lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java Sun Mar 16 19:39:10 2014
@@ -157,7 +157,7 @@ public class FSTCompletionTest extends L
FSTCompletionLookup lookup = new FSTCompletionLookup(10, true);
Random r = random();
- List<Input> keys = new ArrayList<Input>();
+ List<Input> keys = new ArrayList<>();
for (int i = 0; i < 5000; i++) {
keys.add(new Input(TestUtil.randomSimpleString(r), -1));
}
@@ -199,7 +199,7 @@ public class FSTCompletionTest extends L
}
public void testRandom() throws Exception {
- List<Input> freqs = new ArrayList<Input>();
+ List<Input> freqs = new ArrayList<>();
Random rnd = random();
for (int i = 0; i < 2500 + rnd.nextInt(2500); i++) {
int weight = rnd.nextInt(100);
Modified: lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java Sun Mar 16 19:39:10 2014
@@ -128,8 +128,8 @@ public class WFSTCompletionTest extends
public void testRandom() throws Exception {
int numWords = atLeast(1000);
- final TreeMap<String,Long> slowCompletor = new TreeMap<String,Long>();
- final TreeSet<String> allPrefixes = new TreeSet<String>();
+ final TreeMap<String,Long> slowCompletor = new TreeMap<>();
+ final TreeSet<String> allPrefixes = new TreeSet<>();
Input[] keys = new Input[numWords];
@@ -163,7 +163,7 @@ public class WFSTCompletionTest extends
List<LookupResult> r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random), false, topN);
// 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion
- final List<LookupResult> matches = new ArrayList<LookupResult>();
+ final List<LookupResult> matches = new ArrayList<>();
// TODO: could be faster... but its slowCompletor for a reason
for (Map.Entry<String,Long> e : slowCompletor.entrySet()) {
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java Sun Mar 16 19:39:10 2014
@@ -154,8 +154,8 @@ public abstract class BaseTokenStreamTes
}
// Maps position to the start/end offset:
- final Map<Integer,Integer> posToStartOffset = new HashMap<Integer,Integer>();
- final Map<Integer,Integer> posToEndOffset = new HashMap<Integer,Integer>();
+ final Map<Integer,Integer> posToStartOffset = new HashMap<>();
+ final Map<Integer,Integer> posToEndOffset = new HashMap<>();
ts.reset();
int pos = -1;
@@ -682,12 +682,12 @@ public abstract class BaseTokenStreamTes
PositionIncrementAttribute posIncAtt = ts.hasAttribute(PositionIncrementAttribute.class) ? ts.getAttribute(PositionIncrementAttribute.class) : null;
PositionLengthAttribute posLengthAtt = ts.hasAttribute(PositionLengthAttribute.class) ? ts.getAttribute(PositionLengthAttribute.class) : null;
TypeAttribute typeAtt = ts.hasAttribute(TypeAttribute.class) ? ts.getAttribute(TypeAttribute.class) : null;
- List<String> tokens = new ArrayList<String>();
- List<String> types = new ArrayList<String>();
- List<Integer> positions = new ArrayList<Integer>();
- List<Integer> positionLengths = new ArrayList<Integer>();
- List<Integer> startOffsets = new ArrayList<Integer>();
- List<Integer> endOffsets = new ArrayList<Integer>();
+ List<String> tokens = new ArrayList<>();
+ List<String> types = new ArrayList<>();
+ List<Integer> positions = new ArrayList<>();
+ List<Integer> positionLengths = new ArrayList<>();
+ List<Integer> startOffsets = new ArrayList<>();
+ List<Integer> endOffsets = new ArrayList<>();
ts.reset();
// First pass: save away "correct" tokens
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java Sun Mar 16 19:39:10 2014
@@ -251,7 +251,7 @@ public abstract class CollationTestBase
public void assertThreadSafe(final Analyzer analyzer) throws Exception {
int numTestPoints = 100;
int numThreads = TestUtil.nextInt(random(), 3, 5);
- final HashMap<String,BytesRef> map = new HashMap<String,BytesRef>();
+ final HashMap<String,BytesRef> map = new HashMap<>();
// create a map<String,SortKey> up front.
// then with multiple threads, generate sort keys for all the keys in the map
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java Sun Mar 16 19:39:10 2014
@@ -59,7 +59,7 @@ public abstract class LookaheadTokenFilt
* to record other state at each position. */
protected static class Position implements RollingBuffer.Resettable {
// Buffered input tokens at this position:
- public final List<AttributeSource.State> inputTokens = new ArrayList<AttributeSource.State>();
+ public final List<AttributeSource.State> inputTokens = new ArrayList<>();
// Next buffered token to be returned to consumer:
public int nextRead;
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java Sun Mar 16 19:39:10 2014
@@ -48,7 +48,7 @@ public final class MockAnalyzer extends
private int positionIncrementGap;
private Integer offsetGap;
private final Random random;
- private Map<String,Integer> previousMappings = new HashMap<String,Integer>();
+ private Map<String,Integer> previousMappings = new HashMap<>();
private boolean enableChecks = true;
private int maxTokenLength = MockTokenizer.DEFAULT_MAX_TOKEN_LENGTH;
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/MockCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/MockCharFilter.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/MockCharFilter.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/MockCharFilter.java Sun Mar 16 19:39:10 2014
@@ -100,5 +100,5 @@ public class MockCharFilter extends Char
corrections.put(off, cumulativeDiff);
}
- TreeMap<Integer,Integer> corrections = new TreeMap<Integer,Integer>();
+ TreeMap<Integer,Integer> corrections = new TreeMap<>();
}
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java Sun Mar 16 19:39:10 2014
@@ -44,8 +44,8 @@ public final class ValidatingTokenFilter
private int lastStartOffset;
// Maps position to the start/end offset:
- private final Map<Integer,Integer> posToStartOffset = new HashMap<Integer,Integer>();
- private final Map<Integer,Integer> posToEndOffset = new HashMap<Integer,Integer>();
+ private final Map<Integer,Integer> posToStartOffset = new HashMap<>();
+ private final Map<Integer,Integer> posToEndOffset = new HashMap<>();
private final PositionIncrementAttribute posIncAtt = getAttrIfExists(PositionIncrementAttribute.class);
private final PositionLengthAttribute posLenAtt = getAttrIfExists(PositionLengthAttribute.class);
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesWriter.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesWriter.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesWriter.java Sun Mar 16 19:39:10 2014
@@ -155,7 +155,7 @@ class Lucene40DocValuesWriter extends Do
@Override
public void addBinaryField(FieldInfo field, Iterable<BytesRef> values) throws IOException {
// examine the values to determine best type to use
- HashSet<BytesRef> uniqueValues = new HashSet<BytesRef>();
+ HashSet<BytesRef> uniqueValues = new HashSet<>();
int minLength = Integer.MAX_VALUE;
int maxLength = Integer.MIN_VALUE;
for (BytesRef b : values) {
@@ -314,7 +314,7 @@ class Lucene40DocValuesWriter extends Do
Lucene40DocValuesFormat.BYTES_FIXED_DEREF_VERSION_CURRENT);
// deduplicate
- TreeSet<BytesRef> dictionary = new TreeSet<BytesRef>();
+ TreeSet<BytesRef> dictionary = new TreeSet<>();
for (BytesRef v : values) {
dictionary.add(v == null ? new BytesRef() : BytesRef.deepCopyOf(v));
}
@@ -354,7 +354,7 @@ class Lucene40DocValuesWriter extends Do
Lucene40DocValuesFormat.BYTES_VAR_DEREF_VERSION_CURRENT);
// deduplicate
- TreeSet<BytesRef> dictionary = new TreeSet<BytesRef>();
+ TreeSet<BytesRef> dictionary = new TreeSet<>();
for (BytesRef v : values) {
dictionary.add(v == null ? new BytesRef() : BytesRef.deepCopyOf(v));
}
@@ -362,7 +362,7 @@ class Lucene40DocValuesWriter extends Do
/* values */
long startPosition = data.getFilePointer();
long currentAddress = 0;
- HashMap<BytesRef,Long> valueToAddress = new HashMap<BytesRef,Long>();
+ HashMap<BytesRef,Long> valueToAddress = new HashMap<>();
for (BytesRef v : dictionary) {
currentAddress = data.getFilePointer() - startPosition;
valueToAddress.put(v, currentAddress);
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java Sun Mar 16 19:39:10 2014
@@ -144,7 +144,7 @@ class Lucene42DocValuesConsumer extends
} else {
meta.writeByte(TABLE_COMPRESSED); // table-compressed
Long[] decode = uniqueValues.toArray(new Long[uniqueValues.size()]);
- final HashMap<Long,Integer> encode = new HashMap<Long,Integer>();
+ final HashMap<Long,Integer> encode = new HashMap<>();
data.writeVInt(decode.length);
for (int i = 0; i < decode.length; i++) {
data.writeLong(decode[i]);
@@ -252,7 +252,7 @@ class Lucene42DocValuesConsumer extends
meta.writeByte(FST);
meta.writeLong(data.getFilePointer());
PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
- Builder<Long> builder = new Builder<Long>(INPUT_TYPE.BYTE1, outputs);
+ Builder<Long> builder = new Builder<>(INPUT_TYPE.BYTE1, outputs);
IntsRef scratch = new IntsRef();
long ord = 0;
for (BytesRef v : values) {
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java Sun Mar 16 19:39:10 2014
@@ -96,7 +96,7 @@ public final class MockRandomPostingsFor
// Chooses random IntStreamFactory depending on file's extension
private static class MockIntStreamFactory extends IntStreamFactory {
private final int salt;
- private final List<IntStreamFactory> delegates = new ArrayList<IntStreamFactory>();
+ private final List<IntStreamFactory> delegates = new ArrayList<>();
public MockIntStreamFactory(Random random) {
salt = random.nextInt();
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java Sun Mar 16 19:39:10 2014
@@ -65,7 +65,7 @@ public final class RAMOnlyPostingsFormat
// Postings state:
static class RAMPostings extends FieldsProducer {
- final Map<String,RAMField> fieldToTerms = new TreeMap<String,RAMField>();
+ final Map<String,RAMField> fieldToTerms = new TreeMap<>();
@Override
public Terms terms(String field) {
@@ -98,7 +98,7 @@ public final class RAMOnlyPostingsFormat
static class RAMField extends Terms {
final String field;
- final SortedMap<String,RAMTerm> termToDocs = new TreeMap<String,RAMTerm>();
+ final SortedMap<String,RAMTerm> termToDocs = new TreeMap<>();
long sumTotalTermFreq;
long sumDocFreq;
int docCount;
@@ -167,7 +167,7 @@ public final class RAMOnlyPostingsFormat
static class RAMTerm {
final String term;
long totalTermFreq;
- final List<RAMDoc> docs = new ArrayList<RAMDoc>();
+ final List<RAMDoc> docs = new ArrayList<>();
public RAMTerm(String term) {
this.term = term;
}
@@ -599,7 +599,7 @@ public final class RAMOnlyPostingsFormat
}
// Holds all indexes created, keyed by the ID assigned in fieldsConsumer
- private final Map<Integer,RAMPostings> state = new HashMap<Integer,RAMPostings>();
+ private final Map<Integer,RAMPostings> state = new HashMap<>();
private final AtomicInteger nextID = new AtomicInteger();
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java Sun Mar 16 19:39:10 2014
@@ -48,7 +48,7 @@ public abstract class BaseCompressingDoc
final IndexWriter iwriter = new IndexWriter(dir, iwc);
final int uniqueValueCount = TestUtil.nextInt(random(), 1, 256);
- final List<Long> values = new ArrayList<Long>();
+ final List<Long> values = new ArrayList<>();
final Document doc = new Document();
final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java Sun Mar 16 19:39:10 2014
@@ -1138,7 +1138,7 @@ public abstract class BaseDocValuesForma
RandomIndexWriter w = new RandomIndexWriter(random(), dir, cfg);
int numDocs = atLeast(100);
BytesRefHash hash = new BytesRefHash();
- Map<String, String> docToString = new HashMap<String, String>();
+ Map<String, String> docToString = new HashMap<>();
int maxLength = TestUtil.nextInt(random(), 1, 50);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
@@ -2086,7 +2086,7 @@ public abstract class BaseDocValuesForma
}
int numValues = TestUtil.nextInt(random(), 0, maxValuesPerDoc);
// create a random set of strings
- Set<String> values = new TreeSet<String>();
+ Set<String> values = new TreeSet<>();
for (int v = 0; v < numValues; v++) {
values.add(TestUtil.randomSimpleString(random(), length));
}
@@ -2097,7 +2097,7 @@ public abstract class BaseDocValuesForma
}
// add in any order to the dv field
- ArrayList<String> unordered = new ArrayList<String>(values);
+ ArrayList<String> unordered = new ArrayList<>(values);
Collections.shuffle(unordered, random());
for (String v : unordered) {
doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
@@ -2303,20 +2303,20 @@ public abstract class BaseDocValuesForma
}
int numValues = random().nextInt(17);
// create a random list of strings
- List<String> values = new ArrayList<String>();
+ List<String> values = new ArrayList<>();
for (int v = 0; v < numValues; v++) {
values.add(TestUtil.randomSimpleString(random(), length));
}
// add in any order to the indexed field
- ArrayList<String> unordered = new ArrayList<String>(values);
+ ArrayList<String> unordered = new ArrayList<>(values);
Collections.shuffle(unordered, random());
for (String v : values) {
doc.add(newStringField("indexed", v, Field.Store.NO));
}
// add in any order to the dv field
- ArrayList<String> unordered2 = new ArrayList<String>(values);
+ ArrayList<String> unordered2 = new ArrayList<>(values);
Collections.shuffle(unordered2, random());
for (String v : unordered2) {
doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
@@ -2628,7 +2628,7 @@ public abstract class BaseDocValuesForma
numDocs = TestUtil.nextInt(random(), 100, 200);
}
IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
- List<byte[]> docBytes = new ArrayList<byte[]>();
+ List<byte[]> docBytes = new ArrayList<>();
long totalBytes = 0;
for(int docID=0;docID<numDocs;docID++) {
// we don't use RandomIndexWriter because it might add
@@ -2726,7 +2726,7 @@ public abstract class BaseDocValuesForma
numDocs = TestUtil.nextInt(random(), 100, 200);
}
IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
- List<byte[]> docBytes = new ArrayList<byte[]>();
+ List<byte[]> docBytes = new ArrayList<>();
long totalBytes = 0;
for(int docID=0;docID<numDocs;docID++) {
// we don't use RandomIndexWriter because it might add
@@ -2905,7 +2905,7 @@ public abstract class BaseDocValuesForma
doc.add(dvNumericField);
}
int numSortedSetFields = random().nextInt(3);
- Set<String> values = new TreeSet<String>();
+ Set<String> values = new TreeSet<>();
for (int j = 0; j < numSortedSetFields; j++) {
values.add(TestUtil.randomSimpleString(random()));
}
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseMergePolicyTestCase.java Sun Mar 16 19:39:10 2014
@@ -17,15 +17,15 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
/**
* Base test case for {@link MergePolicy}.
*/
@@ -39,11 +39,11 @@ public abstract class BaseMergePolicyTes
final AtomicBoolean mayMerge = new AtomicBoolean(true);
final MergeScheduler mergeScheduler = new SerialMergeScheduler() {
@Override
- synchronized public void merge(IndexWriter writer) throws IOException {
+ synchronized public void merge(IndexWriter writer, MergeTrigger trigger, boolean newMergesFound) throws IOException {
if (!mayMerge.get() && writer.getNextMerge() != null) {
throw new AssertionError();
}
- super.merge(writer);
+ super.merge(writer, trigger, newMergesFound);
}
};
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergeScheduler(mergeScheduler).setMergePolicy(mergePolicy()));
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java Sun Mar 16 19:39:10 2014
@@ -341,7 +341,7 @@ public abstract class BasePostingsFormat
public static void createPostings() throws IOException {
totalPostings = 0;
totalPayloadBytes = 0;
- fields = new TreeMap<String,SortedMap<BytesRef,Long>>();
+ fields = new TreeMap<>();
final int numFields = TestUtil.nextInt(random(), 1, 5);
if (VERBOSE) {
@@ -362,9 +362,9 @@ public abstract class BasePostingsFormat
null, DocValuesType.NUMERIC, null);
fieldUpto++;
- SortedMap<BytesRef,Long> postings = new TreeMap<BytesRef,Long>();
+ SortedMap<BytesRef,Long> postings = new TreeMap<>();
fields.put(field, postings);
- Set<String> seenTerms = new HashSet<String>();
+ Set<String> seenTerms = new HashSet<>();
int numTerms;
if (random().nextInt(10) == 7) {
@@ -422,7 +422,7 @@ public abstract class BasePostingsFormat
}
}
- allTerms = new ArrayList<FieldAndTerm>();
+ allTerms = new ArrayList<>();
for(Map.Entry<String,SortedMap<BytesRef,Long>> fieldEnt : fields.entrySet()) {
String field = fieldEnt.getKey();
for(Map.Entry<BytesRef,Long> termEnt : fieldEnt.getValue().entrySet()) {
@@ -1103,8 +1103,8 @@ public abstract class BasePostingsFormat
ThreadState threadState = new ThreadState();
// Test random terms/fields:
- List<TermState> termStates = new ArrayList<TermState>();
- List<FieldAndTerm> termStateTerms = new ArrayList<FieldAndTerm>();
+ List<TermState> termStates = new ArrayList<>();
+ List<FieldAndTerm> termStateTerms = new ArrayList<>();
Collections.shuffle(allTerms, random());
int upto = 0;
@@ -1379,13 +1379,15 @@ public abstract class BasePostingsFormat
// during flush/merge
public void testInvertedWrite() throws Exception {
Directory dir = newDirectory();
- IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ MockAnalyzer analyzer = new MockAnalyzer(random());
+ analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
// Must be concurrent because thread(s) can be merging
// while up to one thread flushes, and each of those
// threads iterates over the map while the flushing
// thread might be adding to it:
- final Map<String,TermFreqs> termFreqs = new ConcurrentHashMap<String,TermFreqs>();
+ final Map<String,TermFreqs> termFreqs = new ConcurrentHashMap<>();
final AtomicLong sumDocFreq = new AtomicLong();
final AtomicLong sumTotalTermFreq = new AtomicLong();
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java Sun Mar 16 19:39:10 2014
@@ -99,7 +99,7 @@ public abstract class BaseStoredFieldsFo
final int docCount = atLeast(200);
final int fieldCount = TestUtil.nextInt(rand, 1, 5);
- final List<Integer> fieldIDs = new ArrayList<Integer>();
+ final List<Integer> fieldIDs = new ArrayList<>();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setTokenized(false);
@@ -109,7 +109,7 @@ public abstract class BaseStoredFieldsFo
fieldIDs.add(i);
}
- final Map<String,Document> docs = new HashMap<String,Document>();
+ final Map<String,Document> docs = new HashMap<>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
@@ -439,8 +439,8 @@ public abstract class BaseStoredFieldsFo
final IndexSearcher searcher = new IndexSearcher(rd);
final int concurrentReads = atLeast(5);
final int readsPerThread = atLeast(50);
- final List<Thread> readThreads = new ArrayList<Thread>();
- final AtomicReference<Exception> ex = new AtomicReference<Exception>();
+ final List<Thread> readThreads = new ArrayList<>();
+ final AtomicReference<Exception> ex = new AtomicReference<>();
for (int i = 0; i < concurrentReads; ++i) {
readThreads.add(new Thread() {
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java Sun Mar 16 19:39:10 2014
@@ -102,7 +102,7 @@ public abstract class BaseTermVectorsFor
}
protected Options randomOptions() {
- return RandomPicks.randomFrom(random(), new ArrayList<Options>(validOptions()));
+ return RandomPicks.randomFrom(random(), new ArrayList<>(validOptions()));
}
protected FieldType fieldType(Options options) {
@@ -245,8 +245,8 @@ public abstract class BaseTermVectorsFor
}
}
- positionToTerms = new HashMap<Integer, Set<Integer>>(len);
- startOffsetToTerms = new HashMap<Integer, Set<Integer>>(len);
+ positionToTerms = new HashMap<>(len);
+ startOffsetToTerms = new HashMap<>(len);
for (int i = 0; i < len; ++i) {
if (!positionToTerms.containsKey(positions[i])) {
positionToTerms.put(positions[i], new HashSet<Integer>(1));
@@ -258,7 +258,7 @@ public abstract class BaseTermVectorsFor
startOffsetToTerms.get(startOffsets[i]).add(i);
}
- freqs = new HashMap<String, Integer>();
+ freqs = new HashMap<>();
for (String term : terms) {
if (freqs.containsKey(term)) {
freqs.put(term, freqs.get(term) + 1);
@@ -314,7 +314,7 @@ public abstract class BaseTermVectorsFor
fieldTypes = new FieldType[fieldCount];
tokenStreams = new RandomTokenStream[fieldCount];
Arrays.fill(fieldTypes, fieldType(options));
- final Set<String> usedFileNames = new HashSet<String>();
+ final Set<String> usedFileNames = new HashSet<>();
for (int i = 0; i < fieldCount; ++i) {
do {
this.fieldNames[i] = RandomPicks.randomFrom(random(), fieldNames);
@@ -341,7 +341,7 @@ public abstract class BaseTermVectorsFor
private final BytesRef[] termBytes;
protected RandomDocumentFactory(int distinctFieldNames, int disctinctTerms) {
- final Set<String> fieldNames = new HashSet<String>();
+ final Set<String> fieldNames = new HashSet<>();
while (fieldNames.size() < distinctFieldNames) {
fieldNames.add(TestUtil.randomSimpleString(random()));
fieldNames.remove("id");
@@ -365,8 +365,8 @@ public abstract class BaseTermVectorsFor
// compare field names
assertEquals(doc == null, fields == null);
assertEquals(doc.fieldNames.length, fields.size());
- final Set<String> fields1 = new HashSet<String>();
- final Set<String> fields2 = new HashSet<String>();
+ final Set<String> fields1 = new HashSet<>();
+ final Set<String> fields2 = new HashSet<>();
for (int i = 0; i < doc.fieldNames.length; ++i) {
fields1.add(doc.fieldNames[i]);
}
@@ -389,19 +389,19 @@ public abstract class BaseTermVectorsFor
}
// to test reuse
- private final ThreadLocal<TermsEnum> termsEnum = new ThreadLocal<TermsEnum>();
- private final ThreadLocal<DocsEnum> docsEnum = new ThreadLocal<DocsEnum>();
- private final ThreadLocal<DocsAndPositionsEnum> docsAndPositionsEnum = new ThreadLocal<DocsAndPositionsEnum>();
+ private final ThreadLocal<TermsEnum> termsEnum = new ThreadLocal<>();
+ private final ThreadLocal<DocsEnum> docsEnum = new ThreadLocal<>();
+ private final ThreadLocal<DocsAndPositionsEnum> docsAndPositionsEnum = new ThreadLocal<>();
protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException {
assertEquals(1, terms.getDocCount());
- final int termCount = new HashSet<String>(Arrays.asList(tk.terms)).size();
+ final int termCount = new HashSet<>(Arrays.asList(tk.terms)).size();
assertEquals(termCount, terms.size());
assertEquals(termCount, terms.getSumDocFreq());
assertEquals(ft.storeTermVectorPositions(), terms.hasPositions());
assertEquals(ft.storeTermVectorOffsets(), terms.hasOffsets());
assertEquals(ft.storeTermVectorPayloads() && tk.hasPayloads(), terms.hasPayloads());
- final Set<BytesRef> uniqueTerms = new HashSet<BytesRef>();
+ final Set<BytesRef> uniqueTerms = new HashSet<>();
for (String term : tk.freqs.keySet()) {
uniqueTerms.add(new BytesRef(term));
}
@@ -638,7 +638,7 @@ public abstract class BaseTermVectorsFor
final RandomDocumentFactory docFactory = new RandomDocumentFactory(5, 20);
final int numDocs = atLeast(100);
final int numDeletes = random().nextInt(numDocs);
- final Set<Integer> deletes = new HashSet<Integer>();
+ final Set<Integer> deletes = new HashSet<>();
while (deletes.size() < numDeletes) {
deletes.add(random().nextInt(numDocs));
}
@@ -694,7 +694,7 @@ public abstract class BaseTermVectorsFor
assertEquals(docs[i], reader.getTermVectors(docID));
}
- final AtomicReference<Throwable> exception = new AtomicReference<Throwable>();
+ final AtomicReference<Throwable> exception = new AtomicReference<>();
final Thread[] threads = new Thread[2];
for (int i = 0; i < threads.length; ++i) {
threads[i] = new Thread() {
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java Sun Mar 16 19:39:10 2014
@@ -175,16 +175,16 @@ class DocHelper {
largeLazyField//placeholder for large field, since this is null. It must always be last
};
- public static Map<String,IndexableField> all =new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> indexed =new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> stored =new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> unstored=new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> unindexed=new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> termvector=new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> notermvector=new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> lazy= new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> noNorms=new HashMap<String,IndexableField>();
- public static Map<String,IndexableField> noTf=new HashMap<String,IndexableField>();
+ public static Map<String,IndexableField> all =new HashMap<>();
+ public static Map<String,IndexableField> indexed =new HashMap<>();
+ public static Map<String,IndexableField> stored =new HashMap<>();
+ public static Map<String,IndexableField> unstored=new HashMap<>();
+ public static Map<String,IndexableField> unindexed=new HashMap<>();
+ public static Map<String,IndexableField> termvector=new HashMap<>();
+ public static Map<String,IndexableField> notermvector=new HashMap<>();
+ public static Map<String,IndexableField> lazy= new HashMap<>();
+ public static Map<String,IndexableField> noNorms=new HashMap<>();
+ public static Map<String,IndexableField> noTf=new HashMap<>();
static {
//Initialize the large Lazy Field
@@ -227,7 +227,7 @@ class DocHelper {
static
{
- nameValues = new HashMap<String,Object>();
+ nameValues = new HashMap<>();
nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT);
nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT);
nameValues.put(TEXT_FIELD_3_KEY, FIELD_3_TEXT);
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java Sun Mar 16 19:39:10 2014
@@ -39,7 +39,7 @@ public final class FieldFilterAtomicRead
super(in);
this.fields = fields;
this.negate = negate;
- ArrayList<FieldInfo> filteredInfos = new ArrayList<FieldInfo>();
+ ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
for (FieldInfo fi : in.getFieldInfos()) {
if (hasField(fi.name)) {
filteredInfos.add(fi);
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java Sun Mar 16 19:39:10 2014
@@ -46,7 +46,7 @@ public class MockRandomMergePolicy exten
int numSegments = segmentInfos.size();
- List<SegmentCommitInfo> segments = new ArrayList<SegmentCommitInfo>();
+ List<SegmentCommitInfo> segments = new ArrayList<>();
final Collection<SegmentCommitInfo> merging = writer.get().getMergingSegments();
for(SegmentCommitInfo sipc : segmentInfos) {
@@ -75,7 +75,7 @@ public class MockRandomMergePolicy exten
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge)
throws IOException {
- final List<SegmentCommitInfo> eligibleSegments = new ArrayList<SegmentCommitInfo>();
+ final List<SegmentCommitInfo> eligibleSegments = new ArrayList<>();
for(SegmentCommitInfo info : segmentInfos) {
if (segmentsToMerge.containsKey(info)) {
eligibleSegments.add(info);
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java Sun Mar 16 19:39:10 2014
@@ -68,16 +68,16 @@ import org.apache.lucene.util.TestUtil;
*/
public class RandomCodec extends Lucene46Codec {
/** Shuffled list of postings formats to use for new mappings */
- private List<PostingsFormat> formats = new ArrayList<PostingsFormat>();
+ private List<PostingsFormat> formats = new ArrayList<>();
/** Shuffled list of docvalues formats to use for new mappings */
- private List<DocValuesFormat> dvFormats = new ArrayList<DocValuesFormat>();
+ private List<DocValuesFormat> dvFormats = new ArrayList<>();
/** unique set of format names this codec knows about */
- public Set<String> formatNames = new HashSet<String>();
+ public Set<String> formatNames = new HashSet<>();
/** unique set of docvalues format names this codec knows about */
- public Set<String> dvFormatNames = new HashSet<String>();
+ public Set<String> dvFormatNames = new HashSet<>();
/** memorized field->postingsformat mappings */
// note: we have to sync this map even though its just for debugging/toString,
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java Sun Mar 16 19:39:10 2014
@@ -125,8 +125,8 @@ public abstract class ThreadedIndexingAn
@Override
public void run() {
// TODO: would be better if this were cross thread, so that we make sure one thread deleting anothers added docs works:
- final List<String> toDeleteIDs = new ArrayList<String>();
- final List<SubDocs> toDeleteSubDocs = new ArrayList<SubDocs>();
+ final List<String> toDeleteIDs = new ArrayList<>();
+ final List<SubDocs> toDeleteSubDocs = new ArrayList<>();
while(System.currentTimeMillis() < stopTime && !failed.get()) {
try {
@@ -180,9 +180,9 @@ public abstract class ThreadedIndexingAn
}
final Field packIDField = newStringField("packID", packID, Field.Store.YES);
- final List<String> docIDs = new ArrayList<String>();
+ final List<String> docIDs = new ArrayList<>();
final SubDocs subDocs = new SubDocs(packID, docIDs);
- final List<Document> docsList = new ArrayList<Document>();
+ final List<Document> docsList = new ArrayList<>();
allSubDocs.add(subDocs);
doc.add(packIDField);
@@ -440,8 +440,10 @@ public abstract class ThreadedIndexingAn
if (dir instanceof BaseDirectoryWrapper) {
((BaseDirectoryWrapper) dir).setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
}
+ MockAnalyzer analyzer = new MockAnalyzer(random());
+ analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
- new MockAnalyzer(random())).setInfoStream(new FailOnNonBulkMergesInfoStream());
+ analyzer).setInfoStream(new FailOnNonBulkMergesInfoStream());
if (LuceneTestCase.TEST_NIGHTLY) {
// newIWConfig makes smallish max seg size, which
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java Sun Mar 16 19:39:10 2014
@@ -90,6 +90,7 @@ public class AssertingIndexSearcher exte
@Override
protected void search(List<AtomicReaderContext> leaves, Weight weight, Collector collector) throws IOException {
+ // TODO: shouldn't we AssertingCollector.wrap(collector) here?
super.search(leaves, AssertingWeight.wrap(random, weight), collector);
}
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java Sun Mar 16 19:39:10 2014
@@ -26,37 +26,24 @@ import java.util.Random;
import java.util.WeakHashMap;
import org.apache.lucene.index.AssertingAtomicReader;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.util.VirtualMethod;
/** Wraps a Scorer with additional checks */
public class AssertingScorer extends Scorer {
- enum TopScorer {
- YES, NO, UNKNOWN;
- }
-
- private static final VirtualMethod<Scorer> SCORE_COLLECTOR = new VirtualMethod<Scorer>(Scorer.class, "score", Collector.class);
- private static final VirtualMethod<Scorer> SCORE_COLLECTOR_RANGE = new VirtualMethod<Scorer>(Scorer.class, "score", Collector.class, int.class, int.class);
-
// we need to track scorers using a weak hash map because otherwise we
// could loose references because of eg.
// AssertingScorer.score(Collector) which needs to delegate to work correctly
private static Map<Scorer, WeakReference<AssertingScorer>> ASSERTING_INSTANCES = Collections.synchronizedMap(new WeakHashMap<Scorer, WeakReference<AssertingScorer>>());
- private static Scorer wrap(Random random, Scorer other, TopScorer topScorer, boolean inOrder) {
+ public static Scorer wrap(Random random, Scorer other) {
if (other == null || other instanceof AssertingScorer) {
return other;
}
- final AssertingScorer assertScorer = new AssertingScorer(random, other, topScorer, inOrder);
- ASSERTING_INSTANCES.put(other, new WeakReference<AssertingScorer>(assertScorer));
+ final AssertingScorer assertScorer = new AssertingScorer(random, other);
+ ASSERTING_INSTANCES.put(other, new WeakReference<>(assertScorer));
return assertScorer;
}
- static Scorer wrap(Random random, Scorer other, boolean topScorer, boolean inOrder) {
- return wrap(random, other, topScorer ? TopScorer.YES : TopScorer.NO, inOrder);
- }
-
static Scorer getAssertingScorer(Random random, Scorer other) {
if (other == null || other instanceof AssertingScorer) {
return other;
@@ -68,7 +55,7 @@ public class AssertingScorer extends Sco
// scorer1.score(collector) calls
// collector.setScorer(scorer2) with scorer1 != scorer2, such as
// BooleanScorer. In that case we can't enable all assertions
- return new AssertingScorer(random, other, TopScorer.UNKNOWN, false);
+ return new AssertingScorer(random, other);
} else {
return assertingScorer;
}
@@ -77,20 +64,12 @@ public class AssertingScorer extends Sco
final Random random;
final Scorer in;
final AssertingAtomicReader.AssertingDocsEnum docsEnumIn;
- final TopScorer topScorer;
- final boolean inOrder;
- final boolean canCallNextDoc;
- private AssertingScorer(Random random, Scorer in, TopScorer topScorer, boolean inOrder) {
+ private AssertingScorer(Random random, Scorer in) {
super(in.weight);
this.random = random;
this.in = in;
- this.topScorer = topScorer;
- this.inOrder = inOrder;
- this.docsEnumIn = new AssertingAtomicReader.AssertingDocsEnum(in, topScorer == TopScorer.NO);
- this.canCallNextDoc = topScorer != TopScorer.YES // not a top scorer
- || !SCORE_COLLECTOR_RANGE.isOverriddenAsOf(in.getClass()) // the default impl relies upon nextDoc()
- || !SCORE_COLLECTOR.isOverriddenAsOf(in.getClass()); // the default impl relies upon nextDoc()
+ this.docsEnumIn = new AssertingAtomicReader.AssertingDocsEnum(in);
}
public Scorer getIn() {
@@ -117,39 +96,6 @@ public class AssertingScorer extends Sco
}
@Override
- public void score(Collector collector) throws IOException {
- assert topScorer != TopScorer.NO;
- if (SCORE_COLLECTOR.isOverriddenAsOf(this.in.getClass())) {
- if (random.nextBoolean()) {
- try {
- final boolean remaining = in.score(collector, DocsEnum.NO_MORE_DOCS, in.nextDoc());
- assert !remaining;
- } catch (UnsupportedOperationException e) {
- in.score(collector);
- }
- } else {
- in.score(collector);
- }
- } else {
- // score(Collector) has not been overridden, use the super method in
- // order to benefit from all assertions
- super.score(collector);
- }
- }
-
- @Override
- public boolean score(Collector collector, int max, int firstDocID) throws IOException {
- assert topScorer != TopScorer.NO;
- if (SCORE_COLLECTOR_RANGE.isOverriddenAsOf(this.in.getClass())) {
- return in.score(collector, max, firstDocID);
- } else {
- // score(Collector,int,int) has not been overridden, use the super
- // method in order to benefit from all assertions
- return super.score(collector, max, firstDocID);
- }
- }
-
- @Override
public Collection<ChildScorer> getChildren() {
// We cannot hide that we hold a single child, else
// collectors (e.g. ToParentBlockJoinCollector) that
@@ -171,13 +117,11 @@ public class AssertingScorer extends Sco
@Override
public int nextDoc() throws IOException {
- assert canCallNextDoc : "top scorers should not call nextDoc()";
return docsEnumIn.nextDoc();
}
@Override
public int advance(int target) throws IOException {
- assert canCallNextDoc : "top scorers should not call advance(target)";
return docsEnumIn.advance(target);
}
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java Sun Mar 16 19:39:10 2014
@@ -29,12 +29,14 @@ class AssertingWeight extends Weight {
return other instanceof AssertingWeight ? other : new AssertingWeight(random, other);
}
+ final boolean scoresDocsOutOfOrder;
final Random random;
final Weight in;
AssertingWeight(Random random, Weight in) {
this.random = random;
this.in = in;
+ scoresDocsOutOfOrder = in.scoresDocsOutOfOrder() || random.nextBoolean();
}
@Override
@@ -58,19 +60,46 @@ class AssertingWeight extends Weight {
}
@Override
- public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
- boolean topScorer, Bits acceptDocs) throws IOException {
+ public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException {
// if the caller asks for in-order scoring or if the weight does not support
// out-of order scoring then collection will have to happen in-order.
- final boolean inOrder = scoreDocsInOrder || !scoresDocsOutOfOrder();
- final Scorer inScorer = in.scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
- return AssertingScorer.wrap(new Random(random.nextLong()), inScorer, topScorer, inOrder);
+ final Scorer inScorer = in.scorer(context, acceptDocs);
+ return AssertingScorer.wrap(new Random(random.nextLong()), inScorer);
}
@Override
- public boolean scoresDocsOutOfOrder() {
- return in.scoresDocsOutOfOrder();
+ public BulkScorer bulkScorer(AtomicReaderContext context, boolean scoreDocsInOrder, Bits acceptDocs) throws IOException {
+ // if the caller asks for in-order scoring or if the weight does not support
+ // out-of order scoring then collection will have to happen in-order.
+ BulkScorer inScorer = in.bulkScorer(context, scoreDocsInOrder, acceptDocs);
+ if (inScorer == null) {
+ return null;
+ }
+
+ if (AssertingBulkScorer.shouldWrap(inScorer)) {
+ // The incoming scorer already has a specialized
+ // implementation for BulkScorer, so we should use it:
+ return AssertingBulkScorer.wrap(new Random(random.nextLong()), inScorer);
+ } else if (scoreDocsInOrder == false && random.nextBoolean()) {
+ // The caller claims it can handle out-of-order
+ // docs; let's confirm that by pulling docs and
+ // randomly shuffling them before collection:
+ //Scorer scorer = in.scorer(context, acceptDocs);
+ Scorer scorer = scorer(context, acceptDocs);
+
+ // Scorer should not be null if bulkScorer wasn't:
+ assert scorer != null;
+ return new AssertingBulkOutOfOrderScorer(new Random(random.nextLong()), scorer);
+ } else {
+ // Let super wrap this.scorer instead, so we use
+ // AssertingScorer:
+ return super.bulkScorer(context, scoreDocsInOrder, acceptDocs);
+ }
}
+ @Override
+ public boolean scoresDocsOutOfOrder() {
+ return scoresDocsOutOfOrder;
+ }
}
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java Sun Mar 16 19:39:10 2014
@@ -59,7 +59,7 @@ public class CheckHits {
throws IOException {
String d = q.toString(defaultFieldName);
- Set<Integer> ignore = new TreeSet<Integer>();
+ Set<Integer> ignore = new TreeSet<>();
for (int i = 0; i < results.length; i++) {
ignore.add(Integer.valueOf(results[i]));
}
@@ -98,11 +98,11 @@ public class CheckHits {
QueryUtils.check(random,query,searcher);
- Set<Integer> correct = new TreeSet<Integer>();
+ Set<Integer> correct = new TreeSet<>();
for (int i = 0; i < results.length; i++) {
correct.add(Integer.valueOf(results[i]));
}
- final Set<Integer> actual = new TreeSet<Integer>();
+ final Set<Integer> actual = new TreeSet<>();
final Collector c = new SetCollector(actual);
searcher.search(query, c);
@@ -168,12 +168,12 @@ public class CheckHits {
ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
- Set<Integer> correct = new TreeSet<Integer>();
+ Set<Integer> correct = new TreeSet<>();
for (int i = 0; i < results.length; i++) {
correct.add(Integer.valueOf(results[i]));
}
- Set<Integer> actual = new TreeSet<Integer>();
+ Set<Integer> actual = new TreeSet<>();
for (int i = 0; i < hits.length; i++) {
actual.add(Integer.valueOf(hits[i].doc));
}
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java Sun Mar 16 19:39:10 2014
@@ -267,7 +267,7 @@ public class QueryUtils {
if (scorer == null) {
Weight w = s.createNormalizedWeight(q);
AtomicReaderContext context = readerContextArray.get(leafPtr);
- scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
+ scorer = w.scorer(context, context.reader().getLiveDocs());
}
int op = order[(opidx[0]++) % order.length];
@@ -314,7 +314,7 @@ public class QueryUtils {
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
AtomicReaderContext ctx = (AtomicReaderContext)indexSearcher.getTopReaderContext();
- Scorer scorer = w.scorer(ctx, true, false, ctx.reader().getLiveDocs());
+ Scorer scorer = w.scorer(ctx, ctx.reader().getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -329,7 +329,7 @@ public class QueryUtils {
@Override
public boolean acceptsDocsOutOfOrder() {
- return true;
+ return false;
}
});
@@ -341,7 +341,7 @@ public class QueryUtils {
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
AtomicReaderContext ctx = previousReader.getContext();
- Scorer scorer = w.scorer(ctx, true, false, ctx.reader().getLiveDocs());
+ Scorer scorer = w.scorer(ctx, ctx.reader().getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -372,7 +372,7 @@ public class QueryUtils {
long startMS = System.currentTimeMillis();
for (int i=lastDoc[0]+1; i<=doc; i++) {
Weight w = s.createNormalizedWeight(q);
- Scorer scorer = w.scorer(context.get(leafPtr), true, false, liveDocs);
+ Scorer scorer = w.scorer(context.get(leafPtr), liveDocs);
Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
float skipToScore = scorer.score();
@@ -400,7 +400,7 @@ public class QueryUtils {
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
- Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), true, false, previousReader.getLiveDocs());
+ Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), previousReader.getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -425,7 +425,7 @@ public class QueryUtils {
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
- Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), true, false, previousReader.getLiveDocs());
+ Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), previousReader.getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/RandomSimilarityProvider.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/RandomSimilarityProvider.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/RandomSimilarityProvider.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/RandomSimilarityProvider.java Sun Mar 16 19:39:10 2014
@@ -65,7 +65,7 @@ import org.apache.lucene.search.similari
public class RandomSimilarityProvider extends PerFieldSimilarityWrapper {
final DefaultSimilarity defaultSim = new DefaultSimilarity();
final List<Similarity> knownSims;
- Map<String,Similarity> previousMappings = new HashMap<String,Similarity>();
+ Map<String,Similarity> previousMappings = new HashMap<>();
final int perFieldSeed;
final int coordType; // 0 = no coord, 1 = coord, 2 = crazy coord
final boolean shouldQueryNorm;
@@ -74,7 +74,7 @@ public class RandomSimilarityProvider ex
perFieldSeed = random.nextInt();
coordType = random.nextInt(3);
shouldQueryNorm = random.nextBoolean();
- knownSims = new ArrayList<Similarity>(allSims);
+ knownSims = new ArrayList<>(allSims);
Collections.shuffle(knownSims, random);
}
@@ -138,7 +138,7 @@ public class RandomSimilarityProvider ex
};
static List<Similarity> allSims;
static {
- allSims = new ArrayList<Similarity>();
+ allSims = new ArrayList<>();
allSims.add(new DefaultSimilarity());
allSims.add(new BM25Similarity());
for (BasicModel basicModel : BASIC_MODELS) {
Modified: lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java?rev=1578144&r1=1578143&r2=1578144&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java (original)
+++ lucene/dev/branches/lucene5376_2/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java Sun Mar 16 19:39:10 2014
@@ -178,7 +178,7 @@ public abstract class ShardSearchingTest
// term stats from remote node
Map<Term,TermStatistics> getNodeTermStats(Set<Term> terms, int nodeID, long version) throws IOException {
final NodeState node = nodes[nodeID];
- final Map<Term,TermStatistics> stats = new HashMap<Term,TermStatistics>();
+ final Map<Term,TermStatistics> stats = new HashMap<>();
final IndexSearcher s = node.searchers.acquire(version);
if (s == null) {
throw new SearcherExpiredException("node=" + nodeID + " version=" + version);
@@ -207,8 +207,8 @@ public abstract class ShardSearchingTest
// local cache...? And still LRU otherwise (for the
// still-live searchers).
- private final Map<FieldAndShardVersion,CollectionStatistics> collectionStatsCache = new ConcurrentHashMap<FieldAndShardVersion,CollectionStatistics>();
- private final Map<TermAndShardVersion,TermStatistics> termStatsCache = new ConcurrentHashMap<TermAndShardVersion,TermStatistics>();
+ private final Map<FieldAndShardVersion,CollectionStatistics> collectionStatsCache = new ConcurrentHashMap<>();
+ private final Map<TermAndShardVersion,TermStatistics> termStatsCache = new ConcurrentHashMap<>();
/** Matches docs in the local shard but scores based on
* aggregated stats ("mock distributed scoring") from all
@@ -229,7 +229,7 @@ public abstract class ShardSearchingTest
@Override
public Query rewrite(Query original) throws IOException {
final Query rewritten = super.rewrite(original);
- final Set<Term> terms = new HashSet<Term>();
+ final Set<Term> terms = new HashSet<>();
rewritten.extractTerms(terms);
// Make a single request to remote nodes for term
@@ -239,7 +239,7 @@ public abstract class ShardSearchingTest
continue;
}
- final Set<Term> missing = new HashSet<Term>();
+ final Set<Term> missing = new HashSet<>();
for(Term term : terms) {
final TermAndShardVersion key = new TermAndShardVersion(nodeID, nodeVersions[nodeID], term);
if (!termStatsCache.containsKey(key)) {
@@ -449,7 +449,9 @@ public abstract class ShardSearchingTest
myNodeID = nodeID;
dir = newFSDirectory(TestUtil.getTempDir("ShardSearchingTestBase"));
// TODO: set warmer
- IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+ MockAnalyzer analyzer = new MockAnalyzer(random());
+ analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
+ IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
if (VERBOSE) {
iwc.setInfoStream(new PrintStreamInfoStream(System.out));