You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2011/11/26 02:03:19 UTC
svn commit: r1206387 [3/5] - in /lucene/dev/branches/solrcloud: ./
dev-tools/idea/.idea/ dev-tools/idea/lucene/contrib/
dev-tools/maven/lucene/contrib/demo/
dev-tools/maven/lucene/contrib/highlighter/
dev-tools/maven/lucene/contrib/memory/ dev-tools/ma...
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java Sat Nov 26 01:03:02 2011
@@ -26,7 +26,6 @@ import java.io.OutputStreamWriter;
import java.util.Properties;
import org.apache.commons.compress.compressors.CompressorStreamFactory;
-import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.benchmark.BenchmarkTestCase;
import org.apache.lucene.benchmark.byTask.PerfRunData;
@@ -43,6 +42,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.util.IOUtils;
/** Tests the functionality of {@link LineDocSource}. */
public class LineDocSourceTest extends BenchmarkTestCase {
@@ -113,47 +113,56 @@ public class LineDocSourceTest extends B
private void doIndexAndSearchTestWithRepeats(File file,
Class<? extends LineParser> lineParserClass, int numAdds, String storedField) throws Exception {
-
- Properties props = new Properties();
-
- // LineDocSource specific settings.
- props.setProperty("docs.file", file.getAbsolutePath());
- if (lineParserClass != null) {
- props.setProperty("line.parser", lineParserClass.getName());
- }
-
- // Indexing configuration.
- props.setProperty("analyzer", WhitespaceAnalyzer.class.getName());
- props.setProperty("content.source", LineDocSource.class.getName());
- props.setProperty("directory", "RAMDirectory");
- props.setProperty("doc.stored", "true");
- props.setProperty("doc.index.props", "true");
-
- // Create PerfRunData
- Config config = new Config(props);
- PerfRunData runData = new PerfRunData(config);
-
- TaskSequence tasks = new TaskSequence(runData, "testBzip2", null, false);
- tasks.addTask(new CreateIndexTask(runData));
- for (int i=0; i<numAdds; i++) {
- tasks.addTask(new AddDocTask(runData));
- }
- tasks.addTask(new CloseIndexTask(runData));
- tasks.doLogic();
- IndexReader reader = IndexReader.open(runData.getDirectory());
- IndexSearcher searcher = new IndexSearcher(reader);
- TopDocs td = searcher.search(new TermQuery(new Term("body", "body")), 10);
- assertEquals(numAdds, td.totalHits);
- assertNotNull(td.scoreDocs[0]);
-
- if (storedField==null) {
- storedField = DocMaker.BODY_FIELD; // added to all docs and satisfies field-name == value
+ IndexReader reader = null;
+ IndexSearcher searcher = null;
+ PerfRunData runData = null;
+ try {
+ Properties props = new Properties();
+
+ // LineDocSource specific settings.
+ props.setProperty("docs.file", file.getAbsolutePath());
+ if (lineParserClass != null) {
+ props.setProperty("line.parser", lineParserClass.getName());
+ }
+
+ // Indexing configuration.
+ props.setProperty("analyzer", WhitespaceAnalyzer.class.getName());
+ props.setProperty("content.source", LineDocSource.class.getName());
+ props.setProperty("directory", "RAMDirectory");
+ props.setProperty("doc.stored", "true");
+ props.setProperty("doc.index.props", "true");
+
+ // Create PerfRunData
+ Config config = new Config(props);
+ runData = new PerfRunData(config);
+
+ TaskSequence tasks = new TaskSequence(runData, "testBzip2", null, false);
+ tasks.addTask(new CreateIndexTask(runData));
+ for (int i=0; i<numAdds; i++) {
+ tasks.addTask(new AddDocTask(runData));
+ }
+ tasks.addTask(new CloseIndexTask(runData));
+ try {
+ tasks.doLogic();
+ } finally {
+ tasks.close();
+ }
+
+ reader = IndexReader.open(runData.getDirectory());
+ searcher = new IndexSearcher(reader);
+ TopDocs td = searcher.search(new TermQuery(new Term("body", "body")), 10);
+ assertEquals(numAdds, td.totalHits);
+ assertNotNull(td.scoreDocs[0]);
+
+ if (storedField==null) {
+ storedField = DocMaker.BODY_FIELD; // added to all docs and satisfies field-name == value
+ }
+ assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField));
+ } finally {
+ IOUtils.close(searcher, reader, runData);
}
- assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField));
- searcher.close();
- reader.close();
}
/* Tests LineDocSource with a bzip2 input stream. */
Modified: lucene/dev/branches/solrcloud/modules/facet/docs/userguide.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/docs/userguide.html?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/docs/userguide.html (original)
+++ lucene/dev/branches/solrcloud/modules/facet/docs/userguide.html Sat Nov 26 01:03:02 2011
@@ -18,10 +18,6 @@
<html>
<title>Facet Userguide</title>
-<!-- load stylesheet + javascript in development mode -->
-<link rel="stylesheet" type="text/css" href="../../../src/tools/prettify/prettify.css">
-<script src="../../../src/tools/prettify/prettify.js" type="text/javascript"></script>
-
<!-- load stylesheet + javascript in distribution mode -->
<link rel="stylesheet" type="text/css" href="prettify.css">
<script src="prettify.js" type="text/javascript"></script>
Modified: lucene/dev/branches/solrcloud/modules/grouping/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/CHANGES.txt?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/CHANGES.txt (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/CHANGES.txt Sat Nov 26 01:03:02 2011
@@ -14,3 +14,7 @@ API Changes
LUCENE-3483: Move Function grouping collectors from Solr to
grouping module. (Martijn van Groningen)
+
+New features
+
+LUCENE-3496: Support grouping by IndexDocValues. (Martijn van Groningen)
\ No newline at end of file
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html Sat Nov 26 01:03:02 2011
@@ -179,11 +179,44 @@ fields, <code>FieldCache</code>, etc.).
FixedBitSet groupHeadsBitSet = c.retrieveGroupHeads(maxDoc)
</pre>
-<p>For each of the above collectors there is also a variant that works with <code>ValueSource</code> instead of
+<p>For each of the above collector types there is also a variant that works with <code>ValueSource</code> instead of
of fields. Concretely this means that these variants can work with functions. These variants are slower than
there term based counter parts. These implementations are located in the
<code>org.apache.lucene.search.grouping.function</code> package.
</p>
+<p>
+ There are also IndexDocValues based implementations available for the group collectors. There are factory methods
+ available for creating idv based instances. A typical example using idv based grouping collectors:
+</p>
+
+<pre class="prettyprint">
+ boolean diskResident = true; // Whether values should fetched directly from disk by passing the Java heap space.
+ AbstractFirstPassGroupingCollector c1 = DVFirstPassGroupingCollector.create(
+ groupSort, groupOffset+topNGroups, "author", ValueType.BYTES_VAR_SORTED, diskResident
+ );
+
+ s.search(new TermQuery(new Term("content", searchTerm)), c1);
+
+ Collection<SearchGroup<BytesRef>> topGroups = c1.getTopGroups(groupOffset, fillFields);
+
+ if (topGroups == null) {
+ // No groups matched
+ return;
+ }
+
+ boolean getScores = true;
+ boolean getMaxScores = true;
+ boolean fillFields = true;
+ AbstractSecondPassGroupingCollector<BytesRef> c2 = DVSecondPassGroupingCollector.create(
+ "author", diskResident, ValueType.BYTES_VAR_SORTED, topGroups, groupSort, docSort,
+ docOffset+docsPerGroup, getScores, getMaxScores, fillFields
+ );
+
+ s.search(new TermQuery(new Term("content", searchTerm)), c2);
+ TopGroups<BytesRef> groupsResult = c2.getTopGroups(docOffset);
+ // Render groupsResult...
+</pre>
+
</body>
</html>
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java Sat Nov 26 01:03:02 2011
@@ -130,7 +130,7 @@ public abstract class TermAllGroupHeadsC
GroupHead groupHead = groups.get(groupValue);
if (groupHead == null) {
groupHead = new GroupHead(groupValue, sortWithinGroup, doc);
- groups.put(groupValue == null ? null : new BytesRef(groupValue), groupHead);
+ groups.put(groupValue == null ? null : BytesRef.deepCopyOf(groupValue), groupHead);
temporalResult.stop = true;
} else {
temporalResult.stop = false;
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java Sat Nov 26 01:03:02 2011
@@ -71,10 +71,10 @@ public class TermFirstPassGroupingCollec
if (groupValue == null) {
return null;
} else if (reuse != null) {
- reuse.copy(groupValue);
+ reuse.copyBytes(groupValue);
return reuse;
} else {
- return new BytesRef(groupValue);
+ return BytesRef.deepCopyOf(groupValue);
}
}
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java Sat Nov 26 01:03:02 2011
@@ -18,17 +18,16 @@ package org.apache.lucene.search.groupin
*/
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.values.ValueType;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
import org.apache.lucene.search.*;
+import org.apache.lucene.search.grouping.dv.DVAllGroupHeadsCollector;
import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector;
import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector;
import org.apache.lucene.store.Directory;
@@ -42,6 +41,10 @@ import java.util.*;
public class AllGroupHeadsCollectorTest extends LuceneTestCase {
+ private static final ValueType[] vts = new ValueType[]{
+ ValueType.BYTES_VAR_DEREF, ValueType.BYTES_VAR_STRAIGHT, ValueType.BYTES_VAR_SORTED
+ };
+
public void testBasic() throws Exception {
final String groupField = "author";
Directory dir = newDirectory();
@@ -50,24 +53,26 @@ public class AllGroupHeadsCollectorTest
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+ boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName());
+ ValueType valueType = vts[random.nextInt(vts.length)];
// 0
Document doc = new Document();
- doc.add(newField(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV, valueType);
doc.add(newField("content", "random text", TextField.TYPE_STORED));
doc.add(newField("id", "1", StringField.TYPE_STORED));
w.addDocument(doc);
// 1
doc = new Document();
- doc.add(newField(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV, valueType);
doc.add(newField("content", "some more random text blob", TextField.TYPE_STORED));
doc.add(newField("id", "2", StringField.TYPE_STORED));
w.addDocument(doc);
// 2
doc = new Document();
- doc.add(newField(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV, valueType);
doc.add(newField("content", "some more random textual data", TextField.TYPE_STORED));
doc.add(newField("id", "3", StringField.TYPE_STORED));
w.addDocument(doc);
@@ -75,21 +80,21 @@ public class AllGroupHeadsCollectorTest
// 3
doc = new Document();
- doc.add(newField(groupField, "author2", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author2", canUseIDV, valueType);
doc.add(newField("content", "some random text", TextField.TYPE_STORED));
doc.add(newField("id", "4", StringField.TYPE_STORED));
w.addDocument(doc);
// 4
doc = new Document();
- doc.add(newField(groupField, "author3", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author3", canUseIDV, valueType);
doc.add(newField("content", "some more random text", TextField.TYPE_STORED));
doc.add(newField("id", "5", StringField.TYPE_STORED));
w.addDocument(doc);
// 5
doc = new Document();
- doc.add(newField(groupField, "author3", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author3", canUseIDV, valueType);
doc.add(newField("content", "random blob", TextField.TYPE_STORED));
doc.add(newField("id", "6", StringField.TYPE_STORED));
w.addDocument(doc);
@@ -108,34 +113,38 @@ public class AllGroupHeadsCollectorTest
IndexReader reader = w.getReader();
IndexSearcher indexSearcher = new IndexSearcher(reader);
+ if (SlowMultiReaderWrapper.class.isAssignableFrom(reader.getClass())) {
+ canUseIDV = false;
+ }
+
w.close();
int maxDoc = reader.maxDoc();
Sort sortWithinGroup = new Sort(new SortField("id", SortField.Type.INT, true));
- AbstractAllGroupHeadsCollector c1 = createRandomCollector(groupField, sortWithinGroup);
+ AbstractAllGroupHeadsCollector c1 = createRandomCollector(groupField, sortWithinGroup, canUseIDV, valueType);
indexSearcher.search(new TermQuery(new Term("content", "random")), c1);
assertTrue(arrayContains(new int[]{2, 3, 5, 7}, c1.retrieveGroupHeads()));
assertTrue(openBitSetContains(new int[]{2, 3, 5, 7}, c1.retrieveGroupHeads(maxDoc), maxDoc));
- AbstractAllGroupHeadsCollector c2 = createRandomCollector(groupField, sortWithinGroup);
+ AbstractAllGroupHeadsCollector c2 = createRandomCollector(groupField, sortWithinGroup, canUseIDV, valueType);
indexSearcher.search(new TermQuery(new Term("content", "some")), c2);
assertTrue(arrayContains(new int[]{2, 3, 4}, c2.retrieveGroupHeads()));
assertTrue(openBitSetContains(new int[]{2, 3, 4}, c2.retrieveGroupHeads(maxDoc), maxDoc));
- AbstractAllGroupHeadsCollector c3 = createRandomCollector(groupField, sortWithinGroup);
+ AbstractAllGroupHeadsCollector c3 = createRandomCollector(groupField, sortWithinGroup, canUseIDV, valueType);
indexSearcher.search(new TermQuery(new Term("content", "blob")), c3);
assertTrue(arrayContains(new int[]{1, 5}, c3.retrieveGroupHeads()));
assertTrue(openBitSetContains(new int[]{1, 5}, c3.retrieveGroupHeads(maxDoc), maxDoc));
// STRING sort type triggers different implementation
Sort sortWithinGroup2 = new Sort(new SortField("id", SortField.Type.STRING, true));
- AbstractAllGroupHeadsCollector c4 = createRandomCollector(groupField, sortWithinGroup2);
+ AbstractAllGroupHeadsCollector c4 = createRandomCollector(groupField, sortWithinGroup2, canUseIDV, valueType);
indexSearcher.search(new TermQuery(new Term("content", "random")), c4);
assertTrue(arrayContains(new int[]{2, 3, 5, 7}, c4.retrieveGroupHeads()));
assertTrue(openBitSetContains(new int[]{2, 3, 5, 7}, c4.retrieveGroupHeads(maxDoc), maxDoc));
Sort sortWithinGroup3 = new Sort(new SortField("id", SortField.Type.STRING, false));
- AbstractAllGroupHeadsCollector c5 = createRandomCollector(groupField, sortWithinGroup3);
+ AbstractAllGroupHeadsCollector c5 = createRandomCollector(groupField, sortWithinGroup3, canUseIDV, valueType);
indexSearcher.search(new TermQuery(new Term("content", "random")), c5);
// 7 b/c higher doc id wins, even if order of field is in not in reverse.
assertTrue(arrayContains(new int[]{0, 3, 4, 6}, c5.retrieveGroupHeads()));
@@ -161,7 +170,13 @@ public class AllGroupHeadsCollectorTest
final List<BytesRef> groups = new ArrayList<BytesRef>();
for (int i = 0; i < numGroups; i++) {
- groups.add(new BytesRef(_TestUtil.randomRealisticUnicodeString(random)));
+ String randomValue;
+ do {
+ // B/c of DV based impl we can't see the difference between an empty string and a null value.
+ // For that reason we don't generate empty string groups.
+ randomValue = _TestUtil.randomRealisticUnicodeString(random);
+ } while ("".equals(randomValue));
+ groups.add(new BytesRef(randomValue));
}
final String[] contentStrings = new String[_TestUtil.nextInt(random, 2, 20)];
if (VERBOSE) {
@@ -186,11 +201,19 @@ public class AllGroupHeadsCollectorTest
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)));
+ boolean preFlex = "Lucene3x".equals(w.w.getConfig().getCodec().getName());
+ boolean canUseIDV = !preFlex;
+ ValueType valueType = vts[random.nextInt(vts.length)];
Document doc = new Document();
Document docNoGroup = new Document();
Field group = newField("group", "", StringField.TYPE_UNSTORED);
doc.add(group);
+ IndexDocValuesField valuesField = null;
+ if (canUseIDV) {
+ valuesField = new IndexDocValuesField("group");
+ doc.add(valuesField);
+ }
Field sort1 = newField("sort1", "", StringField.TYPE_UNSTORED);
doc.add(sort1);
docNoGroup.add(sort1);
@@ -233,6 +256,9 @@ public class AllGroupHeadsCollectorTest
groupDocs[i] = groupDoc;
if (groupDoc.group != null) {
group.setValue(groupDoc.group.utf8ToString());
+ if (canUseIDV) {
+ valuesField.setBytes(new BytesRef(groupDoc.group.utf8ToString()), valueType);
+ }
}
sort1.setValue(groupDoc.sort1.utf8ToString());
sort2.setValue(groupDoc.sort2.utf8ToString());
@@ -259,6 +285,11 @@ public class AllGroupHeadsCollectorTest
try {
final IndexSearcher s = newSearcher(r);
+ if (SlowMultiReaderWrapper.class.isAssignableFrom(s.getIndexReader().getClass())) {
+ canUseIDV = false;
+ } else {
+ canUseIDV = !preFlex;
+ }
for (int contentID = 0; contentID < 3; contentID++) {
final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real" + contentID)), numDocs).scoreDocs;
@@ -284,7 +315,7 @@ public class AllGroupHeadsCollectorTest
final String searchTerm = "real" + random.nextInt(3);
boolean sortByScoreOnly = random.nextBoolean();
Sort sortWithinGroup = getRandomSort(sortByScoreOnly);
- AbstractAllGroupHeadsCollector allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup);
+ AbstractAllGroupHeadsCollector allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup, canUseIDV, valueType);
s.search(new TermQuery(new Term("content", searchTerm)), allGroupHeadsCollector);
int[] expectedGroupHeads = createExpectedGroupHeads(searchTerm, groupDocs, sortWithinGroup, sortByScoreOnly, fieldIdToDocID);
int[] actualGroupHeads = allGroupHeadsCollector.retrieveGroupHeads();
@@ -475,15 +506,33 @@ public class AllGroupHeadsCollectorTest
};
}
- private AbstractAllGroupHeadsCollector createRandomCollector(String groupField, Sort sortWithinGroup) throws IOException {
+ private AbstractAllGroupHeadsCollector createRandomCollector(String groupField, Sort sortWithinGroup, boolean canUseIDV, ValueType valueType) throws IOException {
+ AbstractAllGroupHeadsCollector collector;
if (random.nextBoolean()) {
ValueSource vs = new BytesRefFieldSource(groupField);
- return new FunctionAllGroupHeadsCollector(vs, new HashMap(), sortWithinGroup);
+ collector = new FunctionAllGroupHeadsCollector(vs, new HashMap(), sortWithinGroup);
+ } else if (canUseIDV && random.nextBoolean()) {
+ boolean diskResident = random.nextBoolean();
+ collector = DVAllGroupHeadsCollector.create(groupField, sortWithinGroup, valueType, diskResident);
} else {
- return TermAllGroupHeadsCollector.create(groupField, sortWithinGroup);
+ collector = TermAllGroupHeadsCollector.create(groupField, sortWithinGroup);
+ }
+
+ if (VERBOSE) {
+ System.out.println("Selected implementation: " + collector.getClass().getSimpleName());
}
+
+ return collector;
}
+ private void addGroupField(Document doc, String groupField, String value, boolean canUseIDV, ValueType valueType) {
+ doc.add(new Field(groupField, value, TextField.TYPE_STORED));
+ if (canUseIDV) {
+ IndexDocValuesField valuesField = new IndexDocValuesField(groupField);
+ valuesField.setBytes(new BytesRef(value), valueType);
+ doc.add(valuesField);
+ }
+ }
private static class GroupDoc {
final int id;
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java Sat Nov 26 01:03:02 2011
@@ -18,19 +18,19 @@ package org.apache.lucene.search.groupin
*/
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.*;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.values.ValueType;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
+import org.apache.lucene.search.grouping.dv.DVAllGroupsCollector;
import org.apache.lucene.search.grouping.term.TermAllGroupsCollector;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
@@ -46,27 +46,29 @@ public class AllGroupsCollectorTest exte
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(
- random,
- dir,
- newIndexWriterConfig(TEST_VERSION_CURRENT,
- new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+ random,
+ dir,
+ newIndexWriterConfig(TEST_VERSION_CURRENT,
+ new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+ boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName());
+
// 0
Document doc = new Document();
- doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV);
doc.add(new Field("content", "random text", TextField.TYPE_STORED));
doc.add(new Field("id", "1", customType));
w.addDocument(doc);
// 1
doc = new Document();
- doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV);
doc.add(new Field("content", "some more random text blob", TextField.TYPE_STORED));
doc.add(new Field("id", "2", customType));
w.addDocument(doc);
// 2
doc = new Document();
- doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV);
doc.add(new Field("content", "some more random textual data", TextField.TYPE_STORED));
doc.add(new Field("id", "3", customType));
w.addDocument(doc);
@@ -74,21 +76,21 @@ public class AllGroupsCollectorTest exte
// 3
doc = new Document();
- doc.add(new Field(groupField, "author2", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author2", canUseIDV);
doc.add(new Field("content", "some random text", TextField.TYPE_STORED));
doc.add(new Field("id", "4", customType));
w.addDocument(doc);
// 4
doc = new Document();
- doc.add(new Field(groupField, "author3", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author3", canUseIDV);
doc.add(new Field("content", "some more random text", TextField.TYPE_STORED));
doc.add(new Field("id", "5", customType));
w.addDocument(doc);
// 5
doc = new Document();
- doc.add(new Field(groupField, "author3", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author3", canUseIDV);
doc.add(new Field("content", "random blob", TextField.TYPE_STORED));
doc.add(new Field("id", "6", customType));
w.addDocument(doc);
@@ -102,15 +104,15 @@ public class AllGroupsCollectorTest exte
IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
w.close();
- AbstractAllGroupsCollector c1 = createRandomCollector(groupField);
+ AbstractAllGroupsCollector c1 = createRandomCollector(groupField, canUseIDV);
indexSearcher.search(new TermQuery(new Term("content", "random")), c1);
assertEquals(4, c1.getGroupCount());
- AbstractAllGroupsCollector c2 = createRandomCollector(groupField);
+ AbstractAllGroupsCollector c2 = createRandomCollector(groupField, canUseIDV);
indexSearcher.search(new TermQuery(new Term("content", "some")), c2);
assertEquals(3, c2.getGroupCount());
- AbstractAllGroupsCollector c3 = createRandomCollector(groupField);
+ AbstractAllGroupsCollector c3 = createRandomCollector(groupField, canUseIDV);
indexSearcher.search(new TermQuery(new Term("content", "blob")), c3);
assertEquals(2, c3.getGroupCount());
@@ -118,13 +120,32 @@ public class AllGroupsCollectorTest exte
dir.close();
}
- private AbstractAllGroupsCollector createRandomCollector(String groupField) throws IOException {
- if (random.nextBoolean()) {
- return new TermAllGroupsCollector(groupField);
+ private void addGroupField(Document doc, String groupField, String value, boolean canUseIDV) {
+ doc.add(new Field(groupField, value, TextField.TYPE_STORED));
+ if (canUseIDV) {
+ IndexDocValuesField valuesField = new IndexDocValuesField(groupField);
+ valuesField.setBytes(new BytesRef(value), ValueType.BYTES_VAR_SORTED);
+ doc.add(valuesField);
+ }
+ }
+
+ private AbstractAllGroupsCollector createRandomCollector(String groupField, boolean canUseIDV) throws IOException {
+ AbstractAllGroupsCollector selected;
+ if (random.nextBoolean() && canUseIDV) {
+ boolean diskResident = random.nextBoolean();
+ selected = DVAllGroupsCollector.create(groupField, ValueType.BYTES_VAR_SORTED, diskResident);
+ } else if (random.nextBoolean()) {
+ selected = new TermAllGroupsCollector(groupField);
} else {
ValueSource vs = new BytesRefFieldSource(groupField);
- return new FunctionAllGroupsCollector(vs, new HashMap());
+ selected = new FunctionAllGroupsCollector(vs, new HashMap());
}
+
+ if (VERBOSE) {
+ System.out.println("Selected implementation: " + selected.getClass().getName());
+ }
+
+ return selected;
}
}
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java Sat Nov 26 01:03:02 2011
@@ -17,23 +17,20 @@
package org.apache.lucene.search.grouping;
-import java.io.IOException;
-import java.util.*;
-
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericField;
+import org.apache.lucene.document.*;
import org.apache.lucene.index.FieldInfo.IndexOptions;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.values.ValueType;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
import org.apache.lucene.search.*;
+import org.apache.lucene.search.grouping.dv.DVAllGroupsCollector;
+import org.apache.lucene.search.grouping.dv.DVFirstPassGroupingCollector;
+import org.apache.lucene.search.grouping.dv.DVSecondPassGroupingCollector;
import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector;
@@ -48,6 +45,9 @@ import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.mutable.MutableValue;
import org.apache.lucene.util.mutable.MutableValueStr;
+import java.io.IOException;
+import java.util.*;
+
// TODO
// - should test relevance sort too
// - test null
@@ -62,51 +62,52 @@ public class TestGrouping extends Lucene
FieldType customType = new FieldType();
customType.setStored(true);
-
+
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(
random,
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+ boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName());
// 0
Document doc = new Document();
- doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV);
doc.add(new Field("content", "random text", TextField.TYPE_STORED));
doc.add(new Field("id", "1", customType));
w.addDocument(doc);
// 1
doc = new Document();
- doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV);
doc.add(new Field("content", "some more random text", TextField.TYPE_STORED));
doc.add(new Field("id", "2", customType));
w.addDocument(doc);
// 2
doc = new Document();
- doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author1", canUseIDV);
doc.add(new Field("content", "some more random textual data", TextField.TYPE_STORED));
doc.add(new Field("id", "3", customType));
w.addDocument(doc);
// 3
doc = new Document();
- doc.add(new Field(groupField, "author2", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author2", canUseIDV);
doc.add(new Field("content", "some random text", TextField.TYPE_STORED));
doc.add(new Field("id", "4", customType));
w.addDocument(doc);
// 4
doc = new Document();
- doc.add(new Field(groupField, "author3", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author3", canUseIDV);
doc.add(new Field("content", "some more random text", TextField.TYPE_STORED));
doc.add(new Field("id", "5", customType));
w.addDocument(doc);
// 5
doc = new Document();
- doc.add(new Field(groupField, "author3", TextField.TYPE_STORED));
+ addGroupField(doc, groupField, "author3", canUseIDV);
doc.add(new Field("content", "random", TextField.TYPE_STORED));
doc.add(new Field("id", "6", customType));
w.addDocument(doc);
@@ -121,7 +122,7 @@ public class TestGrouping extends Lucene
w.close();
final Sort groupSort = Sort.RELEVANCE;
- final AbstractFirstPassGroupingCollector c1 = createRandomFirstPassCollector(groupField, groupSort, 10);
+ final AbstractFirstPassGroupingCollector c1 = createRandomFirstPassCollector(groupField, groupSort, 10, canUseIDV);
indexSearcher.search(new TermQuery(new Term("content", "random")), c1);
final AbstractSecondPassGroupingCollector c2 = createSecondPassCollector(c1, groupField, groupSort, null, 0, 5, true, false, true);
@@ -167,8 +168,37 @@ public class TestGrouping extends Lucene
dir.close();
}
- private AbstractFirstPassGroupingCollector createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs) throws IOException {
- if (random.nextBoolean()) {
+ private void addGroupField(Document doc, String groupField, String value, boolean canUseIDV) {
+ doc.add(new Field(groupField, value, TextField.TYPE_STORED));
+ if (canUseIDV) {
+ IndexDocValuesField valuesField = new IndexDocValuesField(groupField);
+ valuesField.setBytes(new BytesRef(value), ValueType.BYTES_VAR_SORTED);
+ doc.add(valuesField);
+ }
+ }
+
+ private AbstractFirstPassGroupingCollector createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs, boolean canUseIDV) throws IOException {
+ AbstractFirstPassGroupingCollector selected;
+ if (canUseIDV && random.nextBoolean()) {
+ boolean diskResident = random.nextBoolean();
+ selected = DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField, ValueType.BYTES_VAR_SORTED, diskResident);
+ } else if (random.nextBoolean()) {
+ ValueSource vs = new BytesRefFieldSource(groupField);
+ selected = new FunctionFirstPassGroupingCollector(vs, new HashMap(), groupSort, topDocs);
+ } else {
+ selected = new TermFirstPassGroupingCollector(groupField, groupSort, topDocs);
+ }
+ if (VERBOSE) {
+ System.out.println("Selected implementation: " + selected.getClass().getName());
+ }
+ return selected;
+ }
+
+ private AbstractFirstPassGroupingCollector createFirstPassCollector(String groupField, Sort groupSort, int topDocs, AbstractFirstPassGroupingCollector firstPassGroupingCollector) throws IOException {
+ if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
+ boolean diskResident = random.nextBoolean();
+ return DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField, ValueType.BYTES_VAR_SORTED, diskResident);
+ } else if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
ValueSource vs = new BytesRefFieldSource(groupField);
return new FunctionFirstPassGroupingCollector(vs, new HashMap(), groupSort, topDocs);
} else {
@@ -176,6 +206,7 @@ public class TestGrouping extends Lucene
}
}
+ @SuppressWarnings("unchecked")
private AbstractSecondPassGroupingCollector createSecondPassCollector(AbstractFirstPassGroupingCollector firstPassGroupingCollector,
String groupField,
Sort groupSort,
@@ -186,19 +217,22 @@ public class TestGrouping extends Lucene
boolean getMaxScores,
boolean fillSortFields) throws IOException {
- if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) {
- @SuppressWarnings("unchecked")
+ if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
+ boolean diskResident = random.nextBoolean();
+ Collection<SearchGroup> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields);
+ return DVSecondPassGroupingCollector.create(groupField, diskResident, ValueType.BYTES_VAR_SORTED, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
+ } else if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
Collection<SearchGroup<BytesRef>> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields);
return new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields);
} else {
ValueSource vs = new BytesRefFieldSource(groupField);
- @SuppressWarnings("unchecked")
Collection<SearchGroup<MutableValue>> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields);
return new FunctionSecondPassGroupingCollector(searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields, vs, new HashMap());
}
}
// Basically converts searchGroups from MutableValue to BytesRef if grouping by ValueSource
+ @SuppressWarnings("unchecked")
private AbstractSecondPassGroupingCollector createSecondPassCollector(AbstractFirstPassGroupingCollector firstPassGroupingCollector,
String groupField,
Collection<SearchGroup<BytesRef>> searchGroups,
@@ -208,8 +242,10 @@ public class TestGrouping extends Lucene
boolean getScores,
boolean getMaxScores,
boolean fillSortFields) throws IOException {
-
- if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) {
+ if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
+ boolean diskResident = random.nextBoolean();
+ return DVSecondPassGroupingCollector.create(groupField, diskResident, ValueType.BYTES_VAR_SORTED, (Collection) searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
+ } else if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) {
return new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields);
} else {
ValueSource vs = new BytesRefFieldSource(groupField);
@@ -232,9 +268,13 @@ public class TestGrouping extends Lucene
}
}
- private AbstractAllGroupsCollector createAllGroupsCollector(AbstractFirstPassGroupingCollector firstPassGroupingCollector, String groupField) {
+ private AbstractAllGroupsCollector createAllGroupsCollector(AbstractFirstPassGroupingCollector firstPassGroupingCollector,
+ String groupField) {
if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) {
return new TermAllGroupsCollector(groupField);
+ } else if (firstPassGroupingCollector.getClass().isAssignableFrom(DVFirstPassGroupingCollector.class)) {
+ boolean diskResident = random.nextBoolean();
+ return DVAllGroupsCollector.create(groupField, ValueType.BYTES_VAR_SORTED, diskResident);
} else {
ValueSource vs = new BytesRefFieldSource(groupField);
return new FunctionAllGroupsCollector(vs, new HashMap());
@@ -247,6 +287,8 @@ public class TestGrouping extends Lucene
return;
} else if (group.groupValue.getClass().isAssignableFrom(MutableValueStr.class)) {
return;
+ } else if (((BytesRef) group.groupValue).length == 0) {
+ return;
}
fail();
}
@@ -263,9 +305,9 @@ public class TestGrouping extends Lucene
}
private Collection<SearchGroup<BytesRef>> getSearchGroups(AbstractFirstPassGroupingCollector c, int groupOffset, boolean fillFields) {
- if (c.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) {
+ if (TermFirstPassGroupingCollector.class.isAssignableFrom(c.getClass())) {
return ((TermFirstPassGroupingCollector) c).getTopGroups(groupOffset, fillFields);
- } else if (c.getClass().isAssignableFrom(FunctionFirstPassGroupingCollector.class)) {
+ } else if (FunctionFirstPassGroupingCollector.class.isAssignableFrom(c.getClass())) {
Collection<SearchGroup<MutableValue>> mutableValueGroups = ((FunctionFirstPassGroupingCollector) c).getTopGroups(groupOffset, fillFields);
if (mutableValueGroups == null) {
return null;
@@ -279,6 +321,10 @@ public class TestGrouping extends Lucene
groups.add(sg);
}
return groups;
+ } else if (DVFirstPassGroupingCollector.class.isAssignableFrom(c.getClass())) {
+ @SuppressWarnings("unchecked")
+ Collection<SearchGroup<BytesRef>> topGroups = ((DVFirstPassGroupingCollector<BytesRef>) c).getTopGroups(groupOffset, fillFields);
+ return topGroups;
}
fail();
return null;
@@ -296,6 +342,8 @@ public class TestGrouping extends Lucene
groups.add(new GroupDocs<BytesRef>(mvalGd.maxScore, mvalGd.totalHits, mvalGd.scoreDocs, groupValue, mvalGd.groupSortValues));
}
return new TopGroups<BytesRef>(mvalTopGroups.groupSort, mvalTopGroups.withinGroupSort, mvalTopGroups.totalHitCount, mvalTopGroups.totalGroupedHitCount, groups.toArray(new GroupDocs[groups.size()]));
+ } else if (DVSecondPassGroupingCollector.class.isAssignableFrom(c.getClass())) {
+ return ((DVSecondPassGroupingCollector<BytesRef>) c).getTopGroups(withinGroupOffset);
}
fail();
return null;
@@ -512,7 +560,7 @@ public class TestGrouping extends Lucene
Collections.shuffle(Arrays.asList(groupDocs), random);
final Map<BytesRef,List<GroupDoc>> groupMap = new HashMap<BytesRef,List<GroupDoc>>();
final List<BytesRef> groupValues = new ArrayList<BytesRef>();
-
+
for(GroupDoc groupDoc : groupDocs) {
if (!groupMap.containsKey(groupDoc.group)) {
groupValues.add(groupDoc.group);
@@ -561,8 +609,7 @@ public class TestGrouping extends Lucene
for(List<Document> docs : updateDocs) {
// Just replaces docs w/ same docs:
- w.updateDocuments(new Term("group", docs.get(0).get("group")),
- docs);
+ w.updateDocuments(new Term("group", docs.get(0).get("group")), docs);
}
final IndexReader r = w.getReader();
@@ -588,7 +635,7 @@ public class TestGrouping extends Lucene
subSearchers[0] = new ShardSearcher((IndexReader.AtomicReaderContext) ctx, ctx);
} else {
final IndexReader.CompositeReaderContext compCTX = (IndexReader.CompositeReaderContext) ctx;
- for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
+ for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
subSearchers[searcherIDX] = new ShardSearcher(compCTX.leaves[searcherIDX], compCTX);
}
}
@@ -602,10 +649,10 @@ public class TestGrouping extends Lucene
}
}
}
-
- public void testRandom() throws Exception {
- for(int iter=0;iter<3;iter++) {
+ public void testRandom() throws Exception {
+ int numberOfRuns = _TestUtil.nextInt(random, 3, 6);
+ for (int iter=0; iter<numberOfRuns; iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
@@ -621,8 +668,15 @@ public class TestGrouping extends Lucene
final List<BytesRef> groups = new ArrayList<BytesRef>();
for(int i=0;i<numGroups;i++) {
- groups.add(new BytesRef(_TestUtil.randomRealisticUnicodeString(random)));
- //groups.add(new BytesRef(_TestUtil.randomSimpleString(random)));
+ String randomValue;
+ do {
+ // B/c of DV based impl we can't see the difference between an empty string and a null value.
+ // For that reason we don't generate empty string groups.
+// randomValue = _TestUtil.randomRealisticUnicodeString(random);
+ randomValue = _TestUtil.randomSimpleString(random);
+ } while ("".equals(randomValue));
+
+ groups.add(new BytesRef(randomValue));
}
final String[] contentStrings = new String[_TestUtil.nextInt(random, 2, 20)];
if (VERBOSE) {
@@ -630,7 +684,7 @@ public class TestGrouping extends Lucene
}
for(int contentIDX=0;contentIDX<contentStrings.length;contentIDX++) {
final StringBuilder sb = new StringBuilder();
- sb.append("real" + random.nextInt(3)).append(' ');
+ sb.append("real").append(random.nextInt(3)).append(' ');
final int fakeCount = random.nextInt(10);
for(int fakeIDX=0;fakeIDX<fakeCount;fakeIDX++) {
sb.append("fake ");
@@ -647,9 +701,16 @@ public class TestGrouping extends Lucene
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)));
+ final boolean preFlex = "Lucene3x".equals(w.w.getConfig().getCodec().getName());
+ boolean canUseIDV = !preFlex;
Document doc = new Document();
Document docNoGroup = new Document();
+ IndexDocValuesField idvGroupField = new IndexDocValuesField("group");
+ if (canUseIDV) {
+ doc.add(idvGroupField);
+ }
+
Field group = newField("group", "", StringField.TYPE_UNSTORED);
doc.add(group);
Field sort1 = newField("sort1", "", StringField.TYPE_UNSTORED);
@@ -686,6 +747,9 @@ public class TestGrouping extends Lucene
groupDocs[i] = groupDoc;
if (groupDoc.group != null) {
group.setValue(groupDoc.group.utf8ToString());
+ if (canUseIDV) {
+ idvGroupField.setBytes(BytesRef.deepCopyOf(groupDoc.group), ValueType.BYTES_VAR_SORTED);
+ }
}
sort1.setValue(groupDoc.sort1.utf8ToString());
sort2.setValue(groupDoc.sort2.utf8ToString());
@@ -711,6 +775,11 @@ public class TestGrouping extends Lucene
try {
final IndexSearcher s = newSearcher(r);
+ if (SlowMultiReaderWrapper.class.isAssignableFrom(s.getIndexReader().getClass())) {
+ canUseIDV = false;
+ } else {
+ canUseIDV = !preFlex;
+ }
final ShardState shards = new ShardState(s);
for(int contentID=0;contentID<3;contentID++) {
@@ -806,7 +875,7 @@ public class TestGrouping extends Lucene
System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " dF=" + r.docFreq("content", new BytesRef(searchTerm)) +" dFBlock=" + rBlocks.docFreq("content", new BytesRef(searchTerm)) + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups + " getScores=" + getScores + " getMaxScores=" + getMaxScores);
}
- final AbstractFirstPassGroupingCollector c1 = createRandomFirstPassCollector("group", groupSort, groupOffset+topNGroups);
+ final AbstractFirstPassGroupingCollector c1 = createRandomFirstPassCollector("group", groupSort, groupOffset+topNGroups, canUseIDV);
final CachingCollector cCache;
final Collector c;
@@ -818,7 +887,7 @@ public class TestGrouping extends Lucene
}
final boolean useWrappingCollector = random.nextBoolean();
-
+
if (doCache) {
final double maxCacheMB = random.nextDouble();
if (VERBOSE) {
@@ -827,10 +896,10 @@ public class TestGrouping extends Lucene
if (useWrappingCollector) {
if (doAllGroups) {
- cCache = CachingCollector.create(c1, true, maxCacheMB);
+ cCache = CachingCollector.create(c1, true, maxCacheMB);
c = MultiCollector.wrap(cCache, allGroupsCollector);
} else {
- c = cCache = CachingCollector.create(c1, true, maxCacheMB);
+ c = cCache = CachingCollector.create(c1, true, maxCacheMB);
}
} else {
// Collect only into cache, then replay multiple times:
@@ -844,7 +913,7 @@ public class TestGrouping extends Lucene
c = c1;
}
}
-
+
// Search top reader:
final Query query = new TermQuery(new Term("content", searchTerm));
s.search(query, c);
@@ -879,9 +948,12 @@ public class TestGrouping extends Lucene
}
}
}
-
+
// Get 1st pass top groups using shards
- final TopGroups<BytesRef> topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores);
+
+ ValueHolder<Boolean> idvBasedImplsUsedSharded = new ValueHolder<Boolean>(false);
+ final TopGroups<BytesRef> topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort,
+ groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, canUseIDV, preFlex, idvBasedImplsUsedSharded);
final AbstractSecondPassGroupingCollector c2;
if (topGroups != null) {
@@ -948,19 +1020,32 @@ public class TestGrouping extends Lucene
System.out.println(" id=" + docIDToID[sd.doc] + " score=" + sd.score);
}
}
-
+
if (searchIter == 14) {
for(int docIDX=0;docIDX<s.getIndexReader().maxDoc();docIDX++) {
System.out.println("ID=" + docIDToID[docIDX] + " explain=" + s.explain(query, docIDX));
}
}
}
+
+ if (topGroupsShards == null) {
+ System.out.println("TEST: no matched-merged groups");
+ } else {
+ System.out.println("TEST: matched-merged groups totalGroupedHitCount=" + topGroupsShards.totalGroupedHitCount);
+ for(GroupDocs<BytesRef> gd : topGroupsShards.groups) {
+ System.out.println(" group=" + (gd.groupValue == null ? "null" : gd.groupValue) + " totalHits=" + gd.totalHits);
+ for(ScoreDoc sd : gd.scoreDocs) {
+ System.out.println(" id=" + docIDToID[sd.doc] + " score=" + sd.score);
+ }
+ }
+ }
}
- assertEquals(docIDToID, expectedGroups, groupsResult, true, true, true, getScores);
+ boolean idvBasedImplsUsed = DVFirstPassGroupingCollector.class.isAssignableFrom(c1.getClass());
+ assertEquals(docIDToID, expectedGroups, groupsResult, true, true, true, getScores, idvBasedImplsUsed);
- // Confirm merged shards match:
- assertEquals(docIDToID, expectedGroups, topGroupsShards, true, false, fillFields, getScores);
+ // Confirm merged shards match:
+ assertEquals(docIDToID, expectedGroups, topGroupsShards, true, false, fillFields, getScores, idvBasedImplsUsedSharded.value);
if (topGroupsShards != null) {
verifyShards(shards.docStarts, topGroupsShards);
}
@@ -1008,7 +1093,8 @@ public class TestGrouping extends Lucene
}
// Get shard'd block grouping result:
- final TopGroups<BytesRef> topGroupsBlockShards = searchShards(sBlocks, shardsBlocks.subSearchers, query, groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores);
+ final TopGroups<BytesRef> topGroupsBlockShards = searchShards(sBlocks, shardsBlocks.subSearchers, query,
+ groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, false, true, new ValueHolder<Boolean>(false));
if (expectedGroups != null) {
// Fixup scores for reader2
@@ -1051,8 +1137,8 @@ public class TestGrouping extends Lucene
}
}
- assertEquals(docIDToIDBlocks, expectedGroups, groupsResultBlocks, false, true, true, getScores);
- assertEquals(docIDToIDBlocks, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores);
+ assertEquals(docIDToIDBlocks, expectedGroups, groupsResultBlocks, false, true, true, getScores, false);
+ assertEquals(docIDToIDBlocks, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores, false);
}
s.close();
sBlocks.close();
@@ -1082,29 +1168,10 @@ public class TestGrouping extends Lucene
}
}
- private void assertEquals(Collection<SearchGroup<BytesRef>> groups1, Collection<SearchGroup<BytesRef>> groups2, boolean doSortValues) {
- assertEquals(groups1.size(), groups2.size());
- final Iterator<SearchGroup<BytesRef>> iter1 = groups1.iterator();
- final Iterator<SearchGroup<BytesRef>> iter2 = groups2.iterator();
-
- while(iter1.hasNext()) {
- assertTrue(iter2.hasNext());
-
- SearchGroup<BytesRef> group1 = iter1.next();
- SearchGroup<BytesRef> group2 = iter2.next();
-
- assertEquals(group1.groupValue, group2.groupValue);
- if (doSortValues) {
- assertArrayEquals(group1.sortValues, group2.sortValues);
- }
- }
- assertFalse(iter2.hasNext());
- }
-
private TopGroups<BytesRef> searchShards(IndexSearcher topSearcher, ShardSearcher[] subSearchers, Query query, Sort groupSort, Sort docSort, int groupOffset, int topNGroups, int docOffset,
- int topNDocs, boolean getScores, boolean getMaxScores) throws Exception {
+ int topNDocs, boolean getScores, boolean getMaxScores, boolean canUseIDV, boolean preFlex, ValueHolder<Boolean> usedIdvBasedImpl) throws Exception {
- // TODO: swap in caching, all groups collector here
+ // TODO: swap in caching, all groups collector hereassertEquals(expected.totalHitCount, actual.totalHitCount);
// too...
if (VERBOSE) {
System.out.println("TEST: " + subSearchers.length + " shards: " + Arrays.toString(subSearchers));
@@ -1113,11 +1180,25 @@ public class TestGrouping extends Lucene
final Weight w = topSearcher.createNormalizedWeight(query);
final List<Collection<SearchGroup<BytesRef>>> shardGroups = new ArrayList<Collection<SearchGroup<BytesRef>>>();
List<AbstractFirstPassGroupingCollector> firstPassGroupingCollectors = new ArrayList<AbstractFirstPassGroupingCollector>();
+ AbstractFirstPassGroupingCollector firstPassCollector = null;
for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
- final AbstractFirstPassGroupingCollector c = createRandomFirstPassCollector("group", groupSort, groupOffset+topNGroups);
- firstPassGroupingCollectors.add(c);
- subSearchers[shardIDX].search(w, c);
- final Collection<SearchGroup<BytesRef>> topGroups = getSearchGroups(c, 0, true);
+ if (SlowMultiReaderWrapper.class.isAssignableFrom(subSearchers[shardIDX].getIndexReader().getClass())) {
+ canUseIDV = false;
+ } else {
+ canUseIDV = !preFlex;
+ }
+
+ if (firstPassCollector == null) {
+ firstPassCollector = createRandomFirstPassCollector("group", groupSort, groupOffset + topNGroups, canUseIDV);
+ if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassCollector.getClass())) {
+ usedIdvBasedImpl.value = true;
+ }
+ } else {
+ firstPassCollector = createFirstPassCollector("group", groupSort, groupOffset + topNGroups, firstPassCollector);
+ }
+ firstPassGroupingCollectors.add(firstPassCollector);
+ subSearchers[shardIDX].search(w, firstPassCollector);
+ final Collection<SearchGroup<BytesRef>> topGroups = getSearchGroups(firstPassCollector, 0, true);
if (topGroups != null) {
if (VERBOSE) {
System.out.println(" shard " + shardIDX + " s=" + subSearchers[shardIDX] + " " + topGroups.size() + " groups:");
@@ -1131,56 +1212,78 @@ public class TestGrouping extends Lucene
final Collection<SearchGroup<BytesRef>> mergedTopGroups = SearchGroup.merge(shardGroups, groupOffset, topNGroups, groupSort);
if (VERBOSE) {
- System.out.println(" merged:");
+ System.out.println(" top groups merged:");
if (mergedTopGroups == null) {
System.out.println(" null");
} else {
+ System.out.println(" " + mergedTopGroups.size() + " top groups:");
for(SearchGroup<BytesRef> group : mergedTopGroups) {
- System.out.println(" " + groupToString(group.groupValue) + " groupSort=" + Arrays.toString(group.sortValues));
+ System.out.println(" [" + groupToString(group.groupValue) + "] groupSort=" + Arrays.toString(group.sortValues));
}
}
}
if (mergedTopGroups != null) {
-
// Now 2nd pass:
@SuppressWarnings("unchecked")
- final TopGroups<BytesRef>[] shardTopGroups = new TopGroups[subSearchers.length];
+ final TopGroups<BytesRef>[] shardTopGroups = new TopGroups[subSearchers.length];
for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
- final AbstractSecondPassGroupingCollector c = createSecondPassCollector(firstPassGroupingCollectors.get(shardIDX),
+ final AbstractSecondPassGroupingCollector secondPassCollector = createSecondPassCollector(firstPassGroupingCollectors.get(shardIDX),
"group", mergedTopGroups, groupSort, docSort, docOffset + topNDocs, getScores, getMaxScores, true);
- subSearchers[shardIDX].search(w, c);
- shardTopGroups[shardIDX] = getTopGroups(c, 0);
+ subSearchers[shardIDX].search(w, secondPassCollector);
+ shardTopGroups[shardIDX] = getTopGroups(secondPassCollector, 0);
+ if (VERBOSE) {
+ System.out.println(" " + shardTopGroups[shardIDX].groups.length + " shard[" + shardIDX + "] groups:");
+ for(GroupDocs<BytesRef> group : shardTopGroups[shardIDX].groups) {
+ System.out.println(" [" + groupToString(group.groupValue) + "] groupSort=" + Arrays.toString(group.groupSortValues) + " numDocs=" + group.scoreDocs.length);
+ }
+ }
}
- return TopGroups.merge(shardTopGroups, groupSort, docSort, docOffset, topNDocs);
+ TopGroups<BytesRef> mergedGroups = TopGroups.merge(shardTopGroups, groupSort, docSort, docOffset, topNDocs);
+ if (VERBOSE) {
+ System.out.println(" " + mergedGroups.groups.length + " merged groups:");
+ for(GroupDocs<BytesRef> group : mergedGroups.groups) {
+ System.out.println(" [" + groupToString(group.groupValue) + "] groupSort=" + Arrays.toString(group.groupSortValues) + " numDocs=" + group.scoreDocs.length);
+ }
+ }
+ return mergedGroups;
} else {
return null;
}
}
- private void assertEquals(int[] docIDtoID, TopGroups expected, TopGroups actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores) {
+ private void assertEquals(int[] docIDtoID, TopGroups<BytesRef> expected, TopGroups<BytesRef> actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores, boolean idvBasedImplsUsed) {
if (expected == null) {
assertNull(actual);
return;
}
assertNotNull(actual);
- assertEquals(expected.groups.length, actual.groups.length);
- assertEquals(expected.totalHitCount, actual.totalHitCount);
- assertEquals(expected.totalGroupedHitCount, actual.totalGroupedHitCount);
+ assertEquals("expected.groups.length != actual.groups.length", expected.groups.length, actual.groups.length);
+ assertEquals("expected.totalHitCount != actual.totalHitCount", expected.totalHitCount, actual.totalHitCount);
+ assertEquals("expected.totalGroupedHitCount != actual.totalGroupedHitCount", expected.totalGroupedHitCount, actual.totalGroupedHitCount);
if (expected.totalGroupCount != null && verifyTotalGroupCount) {
- assertEquals(expected.totalGroupCount, actual.totalGroupCount);
+ assertEquals("expected.totalGroupCount != actual.totalGroupCount", expected.totalGroupCount, actual.totalGroupCount);
}
for(int groupIDX=0;groupIDX<expected.groups.length;groupIDX++) {
if (VERBOSE) {
System.out.println(" check groupIDX=" + groupIDX);
}
- final GroupDocs expectedGroup = expected.groups[groupIDX];
- final GroupDocs actualGroup = actual.groups[groupIDX];
+ final GroupDocs<BytesRef> expectedGroup = expected.groups[groupIDX];
+ final GroupDocs<BytesRef> actualGroup = actual.groups[groupIDX];
if (verifyGroupValues) {
- assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
+ if (idvBasedImplsUsed) {
+ if (actualGroup.groupValue.length == 0) {
+ assertNull(expectedGroup.groupValue);
+ } else {
+ assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
+ }
+ } else {
+ assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
+ }
+
}
if (verifySortValues) {
assertArrayEquals(expectedGroup.groupSortValues, actualGroup.groupSortValues);
@@ -1233,4 +1336,13 @@ public class TestGrouping extends Lucene
return "ShardSearcher(" + ctx[0].reader + ")";
}
}
+
+ private static class ValueHolder<V> {
+
+ V value;
+
+ private ValueHolder(V value) {
+ this.value = value;
+ }
+ }
}
Modified: lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java (original)
+++ lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java Sat Nov 26 01:03:02 2011
@@ -79,7 +79,7 @@ public class TermsFilter extends Filter
}
if (terms != null) { // TODO this check doesn't make sense, decide which variable its supposed to be for
- br.copy(term.bytes());
+ br.copyBytes(term.bytes());
if (termsEnum.seekCeil(br) == TermsEnum.SeekStatus.FOUND) {
docs = termsEnum.docs(acceptDocs, docs);
while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
Modified: lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/DocValues.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/DocValues.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/DocValues.java (original)
+++ lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/DocValues.java Sat Nov 26 01:03:02 2011
@@ -59,7 +59,7 @@ public abstract class DocValues {
target.length = 0;
return false;
}
- target.copy(s);
+ target.copyChars(s);
return true;
};
Modified: lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/docvalues/StringIndexDocValues.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/docvalues/StringIndexDocValues.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/docvalues/StringIndexDocValues.java (original)
+++ lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/docvalues/StringIndexDocValues.java Sat Nov 26 01:03:02 2011
@@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexRead
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.mutable.MutableValue;
import org.apache.lucene.util.mutable.MutableValueStr;
@@ -77,7 +78,7 @@ public abstract class StringIndexDocValu
int ord=termsIndex.getOrd(doc);
if (ord==0) return null;
termsIndex.lookup(ord, spare);
- spare.utf8ToChars(spareChars);
+ UnicodeUtil.UTF8toUTF16(spare, spareChars);
return spareChars.toString();
}
Modified: lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/LiteralValueSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/LiteralValueSource.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/LiteralValueSource.java (original)
+++ lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/LiteralValueSource.java Sat Nov 26 01:03:02 2011
@@ -55,7 +55,7 @@ public class LiteralValueSource extends
@Override
public boolean bytesVal(int doc, BytesRef target) {
- target.copy(bytesRef);
+ target.copyBytes(bytesRef);
return true;
}
Modified: lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java (original)
+++ lucene/dev/branches/solrcloud/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java Sat Nov 26 01:03:02 2011
@@ -34,6 +34,7 @@ import org.apache.lucene.search.similari
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.UnicodeUtil;
/**
@@ -740,7 +741,8 @@ public final class MoreLikeThis {
final CharsRef spare = new CharsRef();
BytesRef text;
while((text = termsEnum.next()) != null) {
- final String term = text.utf8ToChars(spare).toString();
+ UnicodeUtil.UTF8toUTF16(text, spare);
+ final String term = spare.toString();
if (isNoiseWord(term)) {
continue;
}
Modified: lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java (original)
+++ lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java Sat Nov 26 01:03:02 2011
@@ -290,7 +290,6 @@ public abstract class QueryParserBase {
this.lowercaseExpandedTerms = lowercaseExpandedTerms;
}
-
/**
* @see #setLowercaseExpandedTerms(boolean)
*/
@@ -540,7 +539,7 @@ public abstract class QueryParserBase {
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
- return newTermQuery(new Term(field, new BytesRef(bytes)));
+ return newTermQuery(new Term(field, BytesRef.deepCopyOf(bytes)));
} else {
if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries)) {
if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries)) {
@@ -559,7 +558,7 @@ public abstract class QueryParserBase {
// safe to ignore, because we know the number of tokens
}
Query currentQuery = newTermQuery(
- new Term(field, new BytesRef(bytes)));
+ new Term(field, BytesRef.deepCopyOf(bytes)));
q.add(currentQuery, occur);
}
return q;
@@ -592,7 +591,7 @@ public abstract class QueryParserBase {
multiTerms.clear();
}
position += positionIncrement;
- multiTerms.add(new Term(field, new BytesRef(bytes)));
+ multiTerms.add(new Term(field, BytesRef.deepCopyOf(bytes)));
}
if (enablePositionIncrements) {
mpq.add(multiTerms.toArray(new Term[0]),position);
@@ -623,9 +622,9 @@ public abstract class QueryParserBase {
if (enablePositionIncrements) {
position += positionIncrement;
- pq.add(new Term(field, new BytesRef(bytes)),position);
+ pq.add(new Term(field, BytesRef.deepCopyOf(bytes)),position);
} else {
- pq.add(new Term(field, new BytesRef(bytes)));
+ pq.add(new Term(field, BytesRef.deepCopyOf(bytes)));
}
}
return pq;
@@ -778,14 +777,21 @@ public abstract class QueryParserBase {
return new FuzzyQuery(term,minimumSimilarity,prefixLength);
}
- private BytesRef analyzeRangePart(String field, String part) {
+ // TODO: Should this be protected instead?
+ private BytesRef analyzeMultitermTerm(String field, String part) {
+ return analyzeMultitermTerm(field, part, analyzer);
+ }
+
+ protected BytesRef analyzeMultitermTerm(String field, String part, Analyzer analyzerIn) {
TokenStream source;
-
+
+ if (analyzerIn == null) analyzerIn = analyzer;
+
try {
- source = analyzer.tokenStream(field, new StringReader(part));
+ source = analyzerIn.tokenStream(field, new StringReader(part));
source.reset();
} catch (IOException e) {
- throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e);
+ throw new RuntimeException("Unable to initialize TokenStream to analyze multiTerm term: " + part, e);
}
TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
@@ -793,10 +799,10 @@ public abstract class QueryParserBase {
try {
if (!source.incrementToken())
- throw new IllegalArgumentException("analyzer returned no terms for range part: " + part);
+ throw new IllegalArgumentException("analyzer returned no terms for multiTerm term: " + part);
termAtt.fillBytesRef();
if (source.incrementToken())
- throw new IllegalArgumentException("analyzer returned too many terms for range part: " + part);
+ throw new IllegalArgumentException("analyzer returned too many terms for multiTerm term: " + part);
} catch (IOException e) {
throw new RuntimeException("error analyzing range part: " + part, e);
}
@@ -805,10 +811,10 @@ public abstract class QueryParserBase {
source.end();
source.close();
} catch (IOException e) {
- throw new RuntimeException("Unable to end & close TokenStream after analyzing range part: " + part, e);
+ throw new RuntimeException("Unable to end & close TokenStream after analyzing multiTerm term: " + part, e);
}
- return new BytesRef(bytes);
+ return BytesRef.deepCopyOf(bytes);
}
/**
@@ -827,13 +833,13 @@ public abstract class QueryParserBase {
if (part1 == null) {
start = null;
} else {
- start = analyzeRangeTerms ? analyzeRangePart(field, part1) : new BytesRef(part1);
+ start = analyzeRangeTerms ? analyzeMultitermTerm(field, part1) : new BytesRef(part1);
}
if (part2 == null) {
end = null;
} else {
- end = analyzeRangeTerms ? analyzeRangePart(field, part2) : new BytesRef(part2);
+ end = analyzeRangeTerms ? analyzeMultitermTerm(field, part2) : new BytesRef(part2);
}
final TermRangeQuery query = new TermRangeQuery(field, start, end, startInclusive, endInclusive);
Modified: lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java (original)
+++ lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java Sat Nov 26 01:03:02 2011
@@ -59,7 +59,7 @@ public class SpanOrTermsBuilder extends
ts.reset();
while (ts.incrementToken()) {
termAtt.fillBytesRef();
- SpanTermQuery stq = new SpanTermQuery(new Term(fieldName, new BytesRef(bytes)));
+ SpanTermQuery stq = new SpanTermQuery(new Term(fieldName, BytesRef.deepCopyOf(bytes)));
clausesList.add(stq);
}
ts.end();
Modified: lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java (original)
+++ lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java Sat Nov 26 01:03:02 2011
@@ -64,7 +64,7 @@ public class TermsFilterBuilder implemen
ts.reset();
while (ts.incrementToken()) {
termAtt.fillBytesRef();
- term = new Term(fieldName, new BytesRef(bytes));
+ term = new Term(fieldName, BytesRef.deepCopyOf(bytes));
tf.addTerm(term);
}
ts.end();
Modified: lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java (original)
+++ lucene/dev/branches/solrcloud/modules/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java Sat Nov 26 01:03:02 2011
@@ -59,7 +59,7 @@ public class TermsQueryBuilder implement
ts.reset();
while (ts.incrementToken()) {
termAtt.fillBytesRef();
- term = new Term(fieldName, new BytesRef(bytes));
+ term = new Term(fieldName, BytesRef.deepCopyOf(bytes));
bq.add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD));
}
ts.end();
Modified: lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java (original)
+++ lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java Sat Nov 26 01:03:02 2011
@@ -36,6 +36,7 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
/**
@@ -371,7 +372,11 @@ public class DirectSpellChecker {
int index = suggestions.length - 1;
for (ScoreTerm s : terms) {
SuggestWord suggestion = new SuggestWord();
- suggestion.string = s.termAsString != null ? s.termAsString : s.term.utf8ToChars(spare).toString();
+ if (s.termAsString == null) {
+ UnicodeUtil.UTF8toUTF16(s.term, spare);
+ s.termAsString = spare.toString();
+ }
+ suggestion.string = s.termAsString;
suggestion.score = s.score;
suggestion.freq = s.docfreq;
suggestions[index--] = suggestion;
@@ -428,7 +433,8 @@ public class DirectSpellChecker {
// undo FuzzyTermsEnum's scale factor for a real scaled lev score
score = boost / e.getScaleFactor() + e.getMinSimilarity();
} else {
- termAsString = candidateTerm.utf8ToChars(spare).toString();
+ UnicodeUtil.UTF8toUTF16(candidateTerm, spare);
+ termAsString = spare.toString();
score = distance.getDistance(term.text(), termAsString);
}
@@ -436,7 +442,7 @@ public class DirectSpellChecker {
continue;
// add new entry in PQ
- st.term = new BytesRef(candidateTerm);
+ st.term = BytesRef.deepCopyOf(candidateTerm);
st.boost = boost;
st.docfreq = df;
st.termAsString = termAsString;
Modified: lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java (original)
+++ lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java Sat Nov 26 01:03:02 2011
@@ -26,6 +26,7 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.UnicodeUtil;
/**
* HighFrequencyDictionary: terms taken from the given field
@@ -89,7 +90,12 @@ public class HighFrequencyDictionary imp
}
hasNextCalled = false;
- return (actualTerm != null) ? actualTerm.utf8ToChars(spare).toString() : null;
+ if (actualTerm == null) {
+ return null;
+ } else {
+ UnicodeUtil.UTF8toUTF16(actualTerm, spare);
+ return spare.toString();
+ }
}
public boolean hasNext() {
Modified: lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java (original)
+++ lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java Sat Nov 26 01:03:02 2011
@@ -24,6 +24,7 @@ import java.util.Iterator;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.MultiFields;
@@ -75,7 +76,7 @@ public class LuceneDictionary implements
return null;
}
- final String result = pendingTerm.utf8ToChars(spare).toString();
+ UnicodeUtil.UTF8toUTF16(pendingTerm, spare);
try {
pendingTerm = termsEnum.next();
@@ -83,7 +84,7 @@ public class LuceneDictionary implements
throw new RuntimeException(e);
}
- return result;
+ return spare.toString();
}
public boolean hasNext() {
Modified: lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (original)
+++ lucene/dev/branches/solrcloud/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java Sat Nov 26 01:03:02 2011
@@ -521,7 +521,7 @@ public class SpellChecker implements jav
if (!isEmpty) {
// we have a non-empty index, check if the term exists
- currentTerm.copy(word);
+ currentTerm.copyChars(word);
for (TermsEnum te : termsEnums) {
if (te.seekExact(currentTerm, false)) {
continue terms;
Modified: lucene/dev/branches/solrcloud/solr/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/solr/CHANGES.txt?rev=1206387&r1=1206386&r2=1206387&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/solr/CHANGES.txt (original)
+++ lucene/dev/branches/solrcloud/solr/CHANGES.txt Sat Nov 26 01:03:02 2011
@@ -188,6 +188,11 @@ New Features
* SOLR-2134 Trie* fields should support sortMissingLast=true, and deprecate Sortable* Field Types
(Ryan McKinley, Mike McCandless, Uwe Schindler, Erick Erickson)
+
+* SOLR-2438: Case insensitive search for wildcard queries. Actually, the ability to specify
+ a complete analysis chain for multiterm queries.
+ (Pete Sturge Erick Erickson, Mentoring from Seeley and Muir)
+
Optimizations
----------------------
@@ -360,11 +365,37 @@ Other Changes
* SOLR-2862: More explicit lexical resources location logged if Carrot2 clustering
extension is used. Fixed solr. impl. of IResource and IResourceLookup. (Dawid Weiss)
+* SOLR-1123: Changed JSONResponseWriter to now use application/json as its Content-Type
+ by default. However the Content-Type can be overwritten and is set to text/plain in
+ the example configuration. (Uri Boness, Chris Male)
+
+* SOLR-2607: Removed deprecated client/ruby directory, which included solr-ruby and flare.
+ (ehatcher)
+
Documentation
----------------------
* SOLR-2232: Improved README info on solr.solr.home in examples
(Eric Pugh and hossman)
+
+================== 3.6.0 ==================
+
+New Features
+----------------------
+* SOLR-2904: BinaryUpdateRequestHandler should be able to accept multiple update requests from
+ a stream (shalin)
+
+* SOLR-1565: StreamingUpdateSolrServer supports RequestWriter API and therefore, javabin update
+ format (shalin)
+
+* SOLR-2438: Case insensitive search for wildcard queries. Actually, the ability to specify
+ a complete analysis chain for multiterm queries.
+ (Pete Sturge Erick Erickson, Mentoring from Seeley and Muir)
+
+
+Bug Fixes
+----------------------
+* SOLR-2912: Fixed File descriptor leak in ShowFileRequestHandler (Michael Ryan, shalin)
================== 3.5.0 ==================
@@ -411,7 +442,7 @@ Optimizations
Bug Fixes
----------------------
* SOLR-2748: The CommitTracker used for commitWith or autoCommit by maxTime
- could commit too frequently and could block adds until a new seaercher was
+ could commit too frequently and could block adds until a new searcher was
registered. (yonik)
* SOLR-2726: Fixed NullPointerException when using spellcheck.q with Suggester.