You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by yo...@apache.org on 2011/11/14 23:36:32 UTC
svn commit: r1201946 [6/14] - in /lucene/dev/branches/solrcloud: ./
dev-tools/idea/lucene/contrib/ lucene/ lucene/contrib/
lucene/contrib/demo/src/java/org/apache/lucene/demo/
lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/
luce...
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java Mon Nov 14 22:36:20 2011
@@ -134,7 +134,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,boolean)}. This works with all
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -143,7 +143,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,FieldCache.ByteParser)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,FieldCache.ByteParser,boolean)}. This works with all
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -172,7 +172,7 @@ public abstract class FieldCacheRangeFil
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final byte[] values = FieldCache.DEFAULT.getBytes(context.reader, field, (FieldCache.ByteParser) parser);
+ final byte[] values = FieldCache.DEFAULT.getBytes(context.reader, field, (FieldCache.ByteParser) parser, false);
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
@Override
boolean matchDoc(int doc) {
@@ -184,7 +184,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,boolean)}. This works with all
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -193,7 +193,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,FieldCache.ShortParser)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,FieldCache.ShortParser,boolean)}. This works with all
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -222,7 +222,7 @@ public abstract class FieldCacheRangeFil
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final short[] values = FieldCache.DEFAULT.getShorts(context.reader, field, (FieldCache.ShortParser) parser);
+ final short[] values = FieldCache.DEFAULT.getShorts(context.reader, field, (FieldCache.ShortParser) parser, false);
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
@Override
boolean matchDoc(int doc) {
@@ -234,7 +234,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,boolean)}. This works with all
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -243,7 +243,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,FieldCache.IntParser)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,FieldCache.IntParser,boolean)}. This works with all
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -272,7 +272,7 @@ public abstract class FieldCacheRangeFil
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final int[] values = FieldCache.DEFAULT.getInts(context.reader, field, (FieldCache.IntParser) parser);
+ final int[] values = FieldCache.DEFAULT.getInts(context.reader, field, (FieldCache.IntParser) parser, false);
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
@Override
boolean matchDoc(int doc) {
@@ -284,7 +284,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,boolean)}. This works with all
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -293,7 +293,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,FieldCache.LongParser)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,FieldCache.LongParser,boolean)}. This works with all
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -322,7 +322,7 @@ public abstract class FieldCacheRangeFil
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final long[] values = FieldCache.DEFAULT.getLongs(context.reader, field, (FieldCache.LongParser) parser);
+ final long[] values = FieldCache.DEFAULT.getLongs(context.reader, field, (FieldCache.LongParser) parser, false);
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
@Override
boolean matchDoc(int doc) {
@@ -334,7 +334,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,boolean)}. This works with all
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -343,7 +343,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,FieldCache.FloatParser)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,FieldCache.FloatParser,boolean)}. This works with all
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -376,7 +376,7 @@ public abstract class FieldCacheRangeFil
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final float[] values = FieldCache.DEFAULT.getFloats(context.reader, field, (FieldCache.FloatParser) parser);
+ final float[] values = FieldCache.DEFAULT.getFloats(context.reader, field, (FieldCache.FloatParser) parser, false);
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
@Override
boolean matchDoc(int doc) {
@@ -388,7 +388,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,boolean)}. This works with all
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -397,7 +397,7 @@ public abstract class FieldCacheRangeFil
}
/**
- * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,FieldCache.DoubleParser)}. This works with all
+ * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,FieldCache.DoubleParser,boolean)}. This works with all
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@@ -430,7 +430,7 @@ public abstract class FieldCacheRangeFil
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
- final double[] values = FieldCache.DEFAULT.getDoubles(context.reader, field, (FieldCache.DoubleParser) parser);
+ final double[] values = FieldCache.DEFAULT.getDoubles(context.reader, field, (FieldCache.DoubleParser) parser, false);
// ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
@Override
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/FieldComparator.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/FieldComparator.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/FieldComparator.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/FieldComparator.java Mon Nov 14 22:36:20 2011
@@ -17,19 +17,27 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.Comparator;
+
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.index.values.IndexDocValues;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.values.IndexDocValues.SortedSource;
import org.apache.lucene.index.values.IndexDocValues.Source;
+import org.apache.lucene.index.values.IndexDocValues;
+import org.apache.lucene.index.values.ValueType;
+import org.apache.lucene.search.FieldCache.ByteParser;
import org.apache.lucene.search.FieldCache.DocTerms;
import org.apache.lucene.search.FieldCache.DocTermsIndex;
-import org.apache.lucene.search.cache.*;
-import org.apache.lucene.search.cache.CachedArray.*;
+import org.apache.lucene.search.FieldCache.DoubleParser;
+import org.apache.lucene.search.FieldCache.FloatParser;
+import org.apache.lucene.search.FieldCache.IntParser;
+import org.apache.lucene.search.FieldCache.LongParser;
+import org.apache.lucene.search.FieldCache.ShortParser;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.packed.PackedInts;
-import java.io.IOException;
-
/**
* Expert: a FieldComparator compares hits so as to determine their
* sort order when collecting the top results with {@link
@@ -185,38 +193,43 @@ public abstract class FieldComparator<T>
}
}
- public static abstract class NumericComparator<T extends CachedArray, U extends Number> extends FieldComparator<U> {
- protected final CachedArrayCreator<T> creator;
- protected T cached;
- protected final boolean checkMissing;
- protected Bits valid;
+ public static abstract class NumericComparator<T extends Number> extends FieldComparator<T> {
+ protected final T missingValue;
+ protected final String field;
+ protected Bits docsWithField;
- public NumericComparator( CachedArrayCreator<T> c, boolean checkMissing ) {
- this.creator = c;
- this.checkMissing = checkMissing;
+ public NumericComparator(String field, T missingValue) {
+ this.field = field;
+ this.missingValue = missingValue;
}
- protected FieldComparator setup(T cached) {
- this.cached = cached;
- if (checkMissing)
- valid = cached.valid;
+ @Override
+ public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
+ if (missingValue != null) {
+ docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader, field);
+ // optimization to remove unneeded checks on the bit interface:
+ if (docsWithField instanceof Bits.MatchAllBits) {
+ docsWithField = null;
+ }
+ } else {
+ docsWithField = null;
+ }
return this;
}
}
/** Parses field's values as byte (using {@link
* FieldCache#getBytes} and sorts by ascending value */
- public static final class ByteComparator extends NumericComparator<ByteValues,Byte> {
- private byte[] docValues;
+ public static final class ByteComparator extends NumericComparator<Byte> {
private final byte[] values;
- private final byte missingValue;
+ private final ByteParser parser;
+ private byte[] currentReaderValues;
private byte bottom;
- ByteComparator(int numHits, ByteValuesCreator creator, Byte missingValue ) {
- super( creator, missingValue!=null );
+ ByteComparator(int numHits, String field, FieldCache.Parser parser, Byte missingValue) {
+ super(field, missingValue);
values = new byte[numHits];
- this.missingValue = checkMissing
- ? missingValue.byteValue() : 0;
+ this.parser = (ByteParser) parser;
}
@Override
@@ -226,27 +239,33 @@ public abstract class FieldComparator<T>
@Override
public int compareBottom(int doc) {
- byte v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ byte v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
return bottom - v2;
}
@Override
public void copy(int slot, int doc) {
- byte v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ byte v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
-
+ }
values[slot] = v2;
}
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getBytes(context.reader, creator.field, creator));
- docValues = cached.values;
- return this;
+ // NOTE: must do this before calling super otherwise
+ // we compute the docsWithField Bits twice!
+ currentReaderValues = FieldCache.DEFAULT.getBytes(context.reader, field, parser, missingValue != null);
+ return super.setNextReader(context);
}
@Override
@@ -263,17 +282,16 @@ public abstract class FieldComparator<T>
/** Parses field's values as double (using {@link
* FieldCache#getDoubles} and sorts by ascending value */
- public static final class DoubleComparator extends NumericComparator<DoubleValues,Double> {
- private double[] docValues;
+ public static final class DoubleComparator extends NumericComparator<Double> {
private final double[] values;
- private final double missingValue;
+ private final DoubleParser parser;
+ private double[] currentReaderValues;
private double bottom;
- DoubleComparator(int numHits, DoubleValuesCreator creator, Double missingValue ) {
- super( creator, missingValue != null );
+ DoubleComparator(int numHits, String field, FieldCache.Parser parser, Double missingValue) {
+ super(field, missingValue);
values = new double[numHits];
- this.missingValue = checkMissing
- ? missingValue.doubleValue() : 0;
+ this.parser = (DoubleParser) parser;
}
@Override
@@ -291,9 +309,12 @@ public abstract class FieldComparator<T>
@Override
public int compareBottom(int doc) {
- double v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ double v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
if (bottom > v2) {
return 1;
@@ -306,18 +327,22 @@ public abstract class FieldComparator<T>
@Override
public void copy(int slot, int doc) {
- double v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ double v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
values[slot] = v2;
}
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getDoubles(context.reader, creator.field, creator));
- docValues = cached.values;
- return this;
+ // NOTE: must do this before calling super otherwise
+ // we compute the docsWithField Bits twice!
+ currentReaderValues = FieldCache.DEFAULT.getDoubles(context.reader, field, parser, missingValue != null);
+ return super.setNextReader(context);
}
@Override
@@ -334,8 +359,8 @@ public abstract class FieldComparator<T>
/** Uses float index values to sort by ascending value */
public static final class FloatDocValuesComparator extends FieldComparator<Double> {
private final double[] values;
- private Source currentReaderValues;
private final String field;
+ private Source currentReaderValues;
private double bottom;
FloatDocValuesComparator(int numHits, String field) {
@@ -378,6 +403,8 @@ public abstract class FieldComparator<T>
final IndexDocValues docValues = context.reader.docValues(field);
if (docValues != null) {
currentReaderValues = docValues.getSource();
+ } else {
+ currentReaderValues = IndexDocValues.getDefaultSource(ValueType.FLOAT_64);
}
return this;
}
@@ -395,17 +422,16 @@ public abstract class FieldComparator<T>
/** Parses field's values as float (using {@link
* FieldCache#getFloats} and sorts by ascending value */
- public static final class FloatComparator extends NumericComparator<FloatValues,Float> {
- private float[] docValues;
+ public static final class FloatComparator extends NumericComparator<Float> {
private final float[] values;
- private final float missingValue;
+ private final FloatParser parser;
+ private float[] currentReaderValues;
private float bottom;
- FloatComparator(int numHits, FloatValuesCreator creator, Float missingValue ) {
- super( creator, missingValue != null );
+ FloatComparator(int numHits, String field, FieldCache.Parser parser, Float missingValue) {
+ super(field, missingValue);
values = new float[numHits];
- this.missingValue = checkMissing
- ? missingValue.floatValue() : 0;
+ this.parser = (FloatParser) parser;
}
@Override
@@ -426,10 +452,12 @@ public abstract class FieldComparator<T>
@Override
public int compareBottom(int doc) {
// TODO: are there sneaky non-branch ways to compute sign of float?
- float v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ float v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
-
+ }
if (bottom > v2) {
return 1;
@@ -442,18 +470,22 @@ public abstract class FieldComparator<T>
@Override
public void copy(int slot, int doc) {
- float v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ float v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
values[slot] = v2;
}
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getFloats(context.reader, creator.field, creator));
- docValues = cached.values;
- return this;
+ // NOTE: must do this before calling super otherwise
+ // we compute the docsWithField Bits twice!
+ currentReaderValues = FieldCache.DEFAULT.getFloats(context.reader, field, parser, missingValue != null);
+ return super.setNextReader(context);
}
@Override
@@ -469,17 +501,16 @@ public abstract class FieldComparator<T>
/** Parses field's values as short (using {@link
* FieldCache#getShorts} and sorts by ascending value */
- public static final class ShortComparator extends NumericComparator<ShortValues,Short> {
- private short[] docValues;
+ public static final class ShortComparator extends NumericComparator<Short> {
private final short[] values;
+ private final ShortParser parser;
+ private short[] currentReaderValues;
private short bottom;
- private final short missingValue;
- ShortComparator(int numHits, ShortValuesCreator creator, Short missingValue ) {
- super( creator, missingValue != null );
+ ShortComparator(int numHits, String field, FieldCache.Parser parser, Short missingValue) {
+ super(field, missingValue);
values = new short[numHits];
- this.missingValue = checkMissing
- ? missingValue.shortValue() : 0;
+ this.parser = (ShortParser) parser;
}
@Override
@@ -489,27 +520,34 @@ public abstract class FieldComparator<T>
@Override
public int compareBottom(int doc) {
- short v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ short v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
return bottom - v2;
}
@Override
public void copy(int slot, int doc) {
- short v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ short v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
values[slot] = v2;
}
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup( FieldCache.DEFAULT.getShorts(context.reader, creator.field, creator));
- docValues = cached.values;
- return this;
+ // NOTE: must do this before calling super otherwise
+ // we compute the docsWithField Bits twice!
+ currentReaderValues = FieldCache.DEFAULT.getShorts(context.reader, field, parser, missingValue != null);
+ return super.setNextReader(context);
}
@Override
@@ -525,17 +563,16 @@ public abstract class FieldComparator<T>
/** Parses field's values as int (using {@link
* FieldCache#getInts} and sorts by ascending value */
- public static final class IntComparator extends NumericComparator<IntValues,Integer> {
- private int[] docValues;
+ public static final class IntComparator extends NumericComparator<Integer> {
private final int[] values;
+ private final IntParser parser;
+ private int[] currentReaderValues;
private int bottom; // Value of bottom of queue
- final int missingValue;
-
- IntComparator(int numHits, IntValuesCreator creator, Integer missingValue ) {
- super( creator, missingValue != null );
+
+ IntComparator(int numHits, String field, FieldCache.Parser parser, Integer missingValue) {
+ super(field, missingValue);
values = new int[numHits];
- this.missingValue = checkMissing
- ? missingValue.intValue() : 0;
+ this.parser = (IntParser) parser;
}
@Override
@@ -561,9 +598,12 @@ public abstract class FieldComparator<T>
// -1/+1/0 sign
// Cannot return bottom - values[slot2] because that
// may overflow
- int v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ int v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
if (bottom > v2) {
return 1;
@@ -576,18 +616,22 @@ public abstract class FieldComparator<T>
@Override
public void copy(int slot, int doc) {
- int v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ int v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
values[slot] = v2;
}
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getInts(context.reader, creator.field, creator));
- docValues = cached.values;
- return this;
+ // NOTE: must do this before calling super otherwise
+ // we compute the docsWithField Bits twice!
+ currentReaderValues = FieldCache.DEFAULT.getInts(context.reader, field, parser, missingValue != null);
+ return super.setNextReader(context);
}
@Override
@@ -652,6 +696,8 @@ public abstract class FieldComparator<T>
IndexDocValues docValues = context.reader.docValues(field);
if (docValues != null) {
currentReaderValues = docValues.getSource();
+ } else {
+ currentReaderValues = IndexDocValues.getDefaultSource(ValueType.FIXED_INTS_64);
}
return this;
}
@@ -669,19 +715,18 @@ public abstract class FieldComparator<T>
/** Parses field's values as long (using {@link
* FieldCache#getLongs} and sorts by ascending value */
- public static final class LongComparator extends NumericComparator<LongValues,Long> {
- private long[] docValues;
+ public static final class LongComparator extends NumericComparator<Long> {
private final long[] values;
+ private final LongParser parser;
+ private long[] currentReaderValues;
private long bottom;
- private final long missingValue;
- LongComparator(int numHits, LongValuesCreator creator, Long missingValue ) {
- super( creator, missingValue != null );
+ LongComparator(int numHits, String field, FieldCache.Parser parser, Long missingValue) {
+ super(field, missingValue);
values = new long[numHits];
- this.missingValue = checkMissing
- ? missingValue.longValue() : 0;
+ this.parser = (LongParser) parser;
}
-
+
@Override
public int compare(int slot1, int slot2) {
// TODO: there are sneaky non-branch ways to compute
@@ -701,11 +746,13 @@ public abstract class FieldComparator<T>
public int compareBottom(int doc) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
- long v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ long v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
-
if (bottom > v2) {
return 1;
} else if (bottom < v2) {
@@ -717,18 +764,22 @@ public abstract class FieldComparator<T>
@Override
public void copy(int slot, int doc) {
- long v2 = docValues[doc];
- if (valid != null && v2==0 && !valid.get(doc))
+ long v2 = currentReaderValues[doc];
+ // Test for v2 == 0 to save Bits.get method call for
+ // the common case (doc has value and value is non-zero):
+ if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
v2 = missingValue;
+ }
values[slot] = v2;
}
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- setup(FieldCache.DEFAULT.getLongs(context.reader, creator.field, creator));
- docValues = cached.values;
- return this;
+ // NOTE: must do this before calling super otherwise
+ // we compute the docsWithField Bits twice!
+ currentReaderValues = FieldCache.DEFAULT.getLongs(context.reader, field, parser, missingValue != null);
+ return super.setNextReader(context);
}
@Override
@@ -868,30 +919,53 @@ public abstract class FieldComparator<T>
* than {@link TermValComparator}. For very small
* result sets it may be slower. */
public static final class TermOrdValComparator extends FieldComparator<BytesRef> {
- /** @lucene.internal */
+ /* Ords for each slot.
+ @lucene.internal */
final int[] ords;
- /** @lucene.internal */
+
+ /* Values for each slot.
+ @lucene.internal */
final BytesRef[] values;
- /** @lucene.internal */
+
+ /* Which reader last copied a value into the slot. When
+ we compare two slots, we just compare-by-ord if the
+ readerGen is the same; else we must compare the
+ values (slower).
+ @lucene.internal */
final int[] readerGen;
- /** @lucene.internal */
+ /* Gen of current reader we are on.
+ @lucene.internal */
int currentReaderGen = -1;
- private DocTermsIndex termsIndex;
+
+ /* Current reader's doc ord/values.
+ @lucene.internal */
+ DocTermsIndex termsIndex;
+
private final String field;
- /** @lucene.internal */
+ /* Bottom slot, or -1 if queue isn't full yet
+ @lucene.internal */
int bottomSlot = -1;
- /** @lucene.internal */
+
+ /* Bottom ord (same as ords[bottomSlot] once bottomSlot
+ is set). Cached for faster compares.
+ @lucene.internal */
int bottomOrd;
- /** @lucene.internal */
+
+ /* True if current bottom slot matches the current
+ reader.
+ @lucene.internal */
boolean bottomSameReader;
- /** @lucene.internal */
+
+ /* Bottom value (same as values[bottomSlot] once
+ bottomSlot is set). Cached for faster compares.
+ @lucene.internal */
BytesRef bottomValue;
- /** @lucene.internal */
+
final BytesRef tempBR = new BytesRef();
- public TermOrdValComparator(int numHits, String field, int sortPos, boolean reversed) {
+ public TermOrdValComparator(int numHits, String field) {
ords = new int[numHits];
values = new BytesRef[numHits];
readerGen = new int[numHits];
@@ -1282,6 +1356,396 @@ public abstract class FieldComparator<T>
}
}
+ /** Sorts by field's natural Term sort order, using
+ * ordinals; this is just like {@link
+ * TermOrdValComparator} except it uses DocValues to
+ * retrieve the sort ords saved during indexing. */
+ public static final class TermOrdValDocValuesComparator extends FieldComparator<BytesRef> {
+ /* Ords for each slot.
+ @lucene.internal */
+ final int[] ords;
+
+ /* Values for each slot.
+ @lucene.internal */
+ final BytesRef[] values;
+
+ /* Which reader last copied a value into the slot. When
+ we compare two slots, we just compare-by-ord if the
+ readerGen is the same; else we must compare the
+ values (slower).
+ @lucene.internal */
+ final int[] readerGen;
+
+ /* Gen of current reader we are on.
+ @lucene.internal */
+ int currentReaderGen = -1;
+
+ /* Current reader's doc ord/values.
+ @lucene.internal */
+ SortedSource termsIndex;
+
+ /* Comparator for comparing by value.
+ @lucene.internal */
+ Comparator<BytesRef> comp;
+
+ private final String field;
+
+ /* Bottom slot, or -1 if queue isn't full yet
+ @lucene.internal */
+ int bottomSlot = -1;
+
+ /* Bottom ord (same as ords[bottomSlot] once bottomSlot
+ is set). Cached for faster compares.
+ @lucene.internal */
+ int bottomOrd;
+
+ /* True if current bottom slot matches the current
+ reader.
+ @lucene.internal */
+ boolean bottomSameReader;
+
+ /* Bottom value (same as values[bottomSlot] once
+ bottomSlot is set). Cached for faster compares.
+ @lucene.internal */
+ BytesRef bottomValue;
+
+ /** @lucene.internal */
+ final BytesRef tempBR = new BytesRef();
+
+ public TermOrdValDocValuesComparator(int numHits, String field) {
+ ords = new int[numHits];
+ values = new BytesRef[numHits];
+ readerGen = new int[numHits];
+ this.field = field;
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ if (readerGen[slot1] == readerGen[slot2]) {
+ return ords[slot1] - ords[slot2];
+ }
+
+ final BytesRef val1 = values[slot1];
+ final BytesRef val2 = values[slot2];
+ if (val1 == null) {
+ if (val2 == null) {
+ return 0;
+ }
+ return -1;
+ } else if (val2 == null) {
+ return 1;
+ }
+ return comp.compare(val1, val2);
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ throw new UnsupportedOperationException();
+ }
+
+ // TODO: would be nice to share these specialized impls
+ // w/ TermOrdValComparator
+
+ /** Base class for specialized (per bit width of the
+ * ords) per-segment comparator. NOTE: this is messy;
+ * we do this only because hotspot can't reliably inline
+ * the underlying array access when looking up doc->ord
+ * @lucene.internal
+ */
+ abstract class PerSegmentComparator extends FieldComparator<BytesRef> {
+
+ @Override
+ public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
+ return TermOrdValDocValuesComparator.this.setNextReader(context);
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ return TermOrdValDocValuesComparator.this.compare(slot1, slot2);
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ TermOrdValDocValuesComparator.this.setBottom(bottom);
+ }
+
+ @Override
+ public BytesRef value(int slot) {
+ return TermOrdValDocValuesComparator.this.value(slot);
+ }
+
+ @Override
+ public int compareValues(BytesRef val1, BytesRef val2) {
+ assert val1 != null;
+ assert val2 != null;
+ return comp.compare(val1, val2);
+ }
+ }
+
+ // Used per-segment when bit width of doc->ord is 8:
+ private final class ByteOrdComparator extends PerSegmentComparator {
+ private final byte[] readerOrds;
+ private final SortedSource termsIndex;
+ private final int docBase;
+
+ public ByteOrdComparator(byte[] readerOrds, SortedSource termsIndex, int docBase) {
+ this.readerOrds = readerOrds;
+ this.termsIndex = termsIndex;
+ this.docBase = docBase;
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ assert bottomSlot != -1;
+ if (bottomSameReader) {
+ // ord is precisely comparable, even in the equal case
+ return bottomOrd - (readerOrds[doc]&0xFF);
+ } else {
+ // ord is only approx comparable: if they are not
+ // equal, we can use that; if they are equal, we
+ // must fallback to compare by value
+ final int order = readerOrds[doc]&0xFF;
+ final int cmp = bottomOrd - order;
+ if (cmp != 0) {
+ return cmp;
+ }
+
+ termsIndex.getByOrd(order, tempBR);
+ return comp.compare(bottomValue, tempBR);
+ }
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ final int ord = readerOrds[doc]&0xFF;
+ ords[slot] = ord;
+ if (values[slot] == null) {
+ values[slot] = new BytesRef();
+ }
+ termsIndex.getByOrd(ord, values[slot]);
+ readerGen[slot] = currentReaderGen;
+ }
+ }
+
+ // Used per-segment when bit width of doc->ord is 16:
+ private final class ShortOrdComparator extends PerSegmentComparator {
+ private final short[] readerOrds;
+ private final SortedSource termsIndex;
+ private final int docBase;
+
+ public ShortOrdComparator(short[] readerOrds, SortedSource termsIndex, int docBase) {
+ this.readerOrds = readerOrds;
+ this.termsIndex = termsIndex;
+ this.docBase = docBase;
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ assert bottomSlot != -1;
+ if (bottomSameReader) {
+ // ord is precisely comparable, even in the equal case
+ return bottomOrd - (readerOrds[doc]&0xFFFF);
+ } else {
+ // ord is only approx comparable: if they are not
+ // equal, we can use that; if they are equal, we
+ // must fallback to compare by value
+ final int order = readerOrds[doc]&0xFFFF;
+ final int cmp = bottomOrd - order;
+ if (cmp != 0) {
+ return cmp;
+ }
+
+ termsIndex.getByOrd(order, tempBR);
+ return comp.compare(bottomValue, tempBR);
+ }
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ final int ord = readerOrds[doc]&0xFFFF;
+ ords[slot] = ord;
+ if (values[slot] == null) {
+ values[slot] = new BytesRef();
+ }
+ termsIndex.getByOrd(ord, values[slot]);
+ readerGen[slot] = currentReaderGen;
+ }
+ }
+
+ // Used per-segment when bit width of doc->ord is 32:
+ private final class IntOrdComparator extends PerSegmentComparator {
+ private final int[] readerOrds;
+ private final SortedSource termsIndex;
+ private final int docBase;
+
+ public IntOrdComparator(int[] readerOrds, SortedSource termsIndex, int docBase) {
+ this.readerOrds = readerOrds;
+ this.termsIndex = termsIndex;
+ this.docBase = docBase;
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ assert bottomSlot != -1;
+ if (bottomSameReader) {
+ // ord is precisely comparable, even in the equal case
+ return bottomOrd - readerOrds[doc];
+ } else {
+ // ord is only approx comparable: if they are not
+ // equal, we can use that; if they are equal, we
+ // must fallback to compare by value
+ final int order = readerOrds[doc];
+ final int cmp = bottomOrd - order;
+ if (cmp != 0) {
+ return cmp;
+ }
+ termsIndex.getByOrd(order, tempBR);
+ return comp.compare(bottomValue, tempBR);
+ }
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ final int ord = readerOrds[doc];
+ ords[slot] = ord;
+ if (values[slot] == null) {
+ values[slot] = new BytesRef();
+ }
+ termsIndex.getByOrd(ord, values[slot]);
+ readerGen[slot] = currentReaderGen;
+ }
+ }
+
+ // Used per-segment when bit width is not a native array
+ // size (8, 16, 32):
+ private final class AnyOrdComparator extends PerSegmentComparator {
+ private final PackedInts.Reader readerOrds;
+ private final int docBase;
+
+ public AnyOrdComparator(PackedInts.Reader readerOrds, int docBase) {
+ this.readerOrds = readerOrds;
+ this.docBase = docBase;
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ assert bottomSlot != -1;
+ if (bottomSameReader) {
+ // ord is precisely comparable, even in the equal case
+ return bottomOrd - (int) readerOrds.get(doc);
+ } else {
+ // ord is only approx comparable: if they are not
+ // equal, we can use that; if they are equal, we
+ // must fallback to compare by value
+ final int order = (int) readerOrds.get(doc);
+ final int cmp = bottomOrd - order;
+ if (cmp != 0) {
+ return cmp;
+ }
+ termsIndex.getByOrd(order, tempBR);
+ return comp.compare(bottomValue, tempBR);
+ }
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ final int ord = (int) readerOrds.get(doc);
+ ords[slot] = ord;
+ if (values[slot] == null) {
+ values[slot] = new BytesRef();
+ }
+ termsIndex.getByOrd(ord, values[slot]);
+ readerGen[slot] = currentReaderGen;
+ }
+ }
+
+ @Override
+ public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
+ final int docBase = context.docBase;
+
+ final IndexDocValues dv = context.reader.docValues(field);
+ if (dv == null) {
+ termsIndex = IndexDocValues.getDefaultSortedSource(ValueType.BYTES_VAR_SORTED, context.reader.maxDoc());
+ } else {
+ termsIndex = dv.getSource().asSortedSource();
+ if (termsIndex == null) {
+ termsIndex = IndexDocValues.getDefaultSortedSource(ValueType.BYTES_VAR_SORTED, context.reader.maxDoc());
+ }
+ }
+
+ comp = termsIndex.getComparator();
+
+ FieldComparator perSegComp = null;
+ final PackedInts.Reader docToOrd = termsIndex.getDocToOrd();
+ if (docToOrd.hasArray()) {
+ final Object arr = docToOrd.getArray();
+ assert arr != null;
+ if (arr instanceof byte[]) {
+ // 8 bit packed
+ perSegComp = new ByteOrdComparator((byte[]) arr, termsIndex, docBase);
+ } else if (arr instanceof short[]) {
+ // 16 bit packed
+ perSegComp = new ShortOrdComparator((short[]) arr, termsIndex, docBase);
+ } else if (arr instanceof int[]) {
+ // 32 bit packed
+ perSegComp = new IntOrdComparator((int[]) arr, termsIndex, docBase);
+ }
+ }
+
+ if (perSegComp == null) {
+ perSegComp = new AnyOrdComparator(docToOrd, docBase);
+ }
+
+ currentReaderGen++;
+ if (bottomSlot != -1) {
+ perSegComp.setBottom(bottomSlot);
+ }
+
+ return perSegComp;
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ bottomSlot = bottom;
+
+ bottomValue = values[bottomSlot];
+ if (currentReaderGen == readerGen[bottomSlot]) {
+ bottomOrd = ords[bottomSlot];
+ bottomSameReader = true;
+ } else {
+ if (bottomValue == null) {
+ // 0 ord is null for all segments
+ assert ords[bottomSlot] == 0;
+ bottomOrd = 0;
+ bottomSameReader = true;
+ readerGen[bottomSlot] = currentReaderGen;
+ } else {
+ final int index = termsIndex.getByValue(bottomValue, tempBR);
+ if (index < 0) {
+ bottomOrd = -index - 2;
+ bottomSameReader = false;
+ } else {
+ bottomOrd = index;
+ // exact value match
+ bottomSameReader = true;
+ readerGen[bottomSlot] = currentReaderGen;
+ ords[bottomSlot] = bottomOrd;
+ }
+ }
+ }
+ }
+
+ @Override
+ public BytesRef value(int slot) {
+ return values[slot];
+ }
+ }
+
/** Sorts by field's natural Term sort order. All
* comparisons are done using BytesRef.compareTo, which is
* slow for medium to large result sets but possibly
@@ -1367,6 +1831,74 @@ public abstract class FieldComparator<T>
}
}
+ /** Sorts by field's natural Term sort order. All
+ * comparisons are done using BytesRef.compareTo, which is
+ * slow for medium to large result sets but possibly
+ * very fast for very small results sets. The BytesRef
+ * values are obtained using {@link IndexReader#docValues}. */
+ public static final class TermValDocValuesComparator extends FieldComparator<BytesRef> {
+
+ private BytesRef[] values;
+ private Source docTerms;
+ private final String field;
+ private BytesRef bottom;
+ private final BytesRef tempBR = new BytesRef();
+
+ TermValDocValuesComparator(int numHits, String field) {
+ values = new BytesRef[numHits];
+ this.field = field;
+ }
+
+ @Override
+ public int compare(int slot1, int slot2) {
+ assert values[slot1] != null;
+ assert values[slot2] != null;
+ return values[slot1].compareTo(values[slot2]);
+ }
+
+ @Override
+ public int compareBottom(int doc) {
+ assert bottom != null;
+ return bottom.compareTo(docTerms.getBytes(doc, tempBR));
+ }
+
+ @Override
+ public void copy(int slot, int doc) {
+ if (values[slot] == null) {
+ values[slot] = new BytesRef();
+ }
+ docTerms.getBytes(doc, values[slot]);
+ }
+
+ @Override
+ public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
+ final IndexDocValues dv = context.reader.docValues(field);
+ if (dv != null) {
+ docTerms = dv.getSource();
+ } else {
+ docTerms = IndexDocValues.getDefaultSource(ValueType.BYTES_VAR_DEREF);
+ }
+ return this;
+ }
+
+ @Override
+ public void setBottom(final int bottom) {
+ this.bottom = values[bottom];
+ }
+
+ @Override
+ public BytesRef value(int slot) {
+ return values[slot];
+ }
+
+ @Override
+ public int compareValues(BytesRef val1, BytesRef val2) {
+ assert val1 != null;
+ assert val2 != null;
+ return val1.compareTo(val2);
+ }
+ }
+
final protected static int binarySearch(BytesRef br, DocTermsIndex a, BytesRef key) {
return binarySearch(br, a, key, 1, a.numOrd()-1);
}
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java Mon Nov 14 22:36:20 2011
@@ -64,7 +64,6 @@ import org.apache.lucene.util.TermContex
public abstract class MultiTermQuery extends Query {
protected final String field;
protected RewriteMethod rewriteMethod = CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
- transient int numberOfTerms = 0;
/** Abstract class that defines how the query is rewritten. */
public static abstract class RewriteMethod {
@@ -271,38 +270,6 @@ public abstract class MultiTermQuery ext
}
/**
- * Expert: Return the number of unique terms visited during execution of the query.
- * If there are many of them, you may consider using another query type
- * or optimize your total term count in index.
- * <p>This method is not thread safe, be sure to only call it when no query is running!
- * If you re-use the same query instance for another
- * search, be sure to first reset the term counter
- * with {@link #clearTotalNumberOfTerms}.
- * <p>On optimized indexes / no MultiReaders, you get the correct number of
- * unique terms for the whole index. Use this number to compare different queries.
- * For non-optimized indexes this number can also be achieved in
- * non-constant-score mode. In constant-score mode you get the total number of
- * terms seeked for all segments / sub-readers.
- * @see #clearTotalNumberOfTerms
- */
- public int getTotalNumberOfTerms() {
- return numberOfTerms;
- }
-
- /**
- * Expert: Resets the counting of unique terms.
- * Do this before executing the query/filter.
- * @see #getTotalNumberOfTerms
- */
- public void clearTotalNumberOfTerms() {
- numberOfTerms = 0;
- }
-
- protected void incTotalNumberOfTerms(int inc) {
- numberOfTerms += inc;
- }
-
- /**
* To rewrite to a simpler form, instead return a simpler
* enum from {@link #getTermsEnum(Terms, AttributeSource)}. For example,
* to rewrite to a single term, return a {@link SingleTermsEnum}
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java Mon Nov 14 22:36:20 2011
@@ -78,29 +78,6 @@ public class MultiTermQueryWrapperFilter
public final String getField() { return query.getField(); }
/**
- * Expert: Return the number of unique terms visited during execution of the filter.
- * If there are many of them, you may consider using another filter type
- * or optimize your total term count in index.
- * <p>This method is not thread safe, be sure to only call it when no filter is running!
- * If you re-use the same filter instance for another
- * search, be sure to first reset the term counter
- * with {@link #clearTotalNumberOfTerms}.
- * @see #clearTotalNumberOfTerms
- */
- public int getTotalNumberOfTerms() {
- return query.getTotalNumberOfTerms();
- }
-
- /**
- * Expert: Resets the counting of unique terms.
- * Do this before executing the filter.
- * @see #getTotalNumberOfTerms
- */
- public void clearTotalNumberOfTerms() {
- query.clearTotalNumberOfTerms();
- }
-
- /**
* Returns a DocIdSet with documents that should be permitted in search
* results.
*/
@@ -146,7 +123,6 @@ public class MultiTermQueryWrapperFilter
} while (termsEnum.next() != null);
// System.out.println(" done termCount=" + termCount);
- query.incTotalNumberOfTerms(termCount);
return bitSet;
} else {
return DocIdSet.EMPTY_DOCIDSET;
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/ScoringRewrite.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/ScoringRewrite.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/ScoringRewrite.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/ScoringRewrite.java Mon Nov 14 22:36:20 2011
@@ -125,7 +125,6 @@ public abstract class ScoringRewrite<Q e
addClause(result, term, termStates[pos].docFreq(), query.getBoost() * boost[pos], termStates[pos]);
}
}
- query.incTotalNumberOfTerms(size);
return result;
}
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/SortField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/SortField.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/SortField.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/SortField.java Mon Nov 14 22:36:20 2011
@@ -20,7 +20,6 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Comparator;
-import org.apache.lucene.search.cache.*;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper;
@@ -104,12 +103,14 @@ public class SortField {
private String field;
private Type type; // defaults to determining type dynamically
boolean reverse = false; // defaults to natural order
- private CachedArrayCreator<?> creator;
- public Object missingValue = null; // used for 'sortMissingFirst/Last'
+ private FieldCache.Parser parser;
// Used for CUSTOM sort
private FieldComparatorSource comparatorSource;
+ // Used for 'sortMissingFirst/Last'
+ public Object missingValue = null;
+
/** Creates a sort by terms in the given field with the type of term
* values explicitly given.
* @param field Name of field to sort by. Can be <code>null</code> if
@@ -141,10 +142,7 @@ public class SortField {
* by testing which numeric parser the parser subclasses.
* @throws IllegalArgumentException if the parser fails to
* subclass an existing numeric parser, or field is null
- *
- * @deprecated (4.0) use EntryCreator version
*/
- @Deprecated
public SortField(String field, FieldCache.Parser parser) {
this(field, parser, false);
}
@@ -159,65 +157,27 @@ public class SortField {
* @param reverse True if natural order should be reversed.
* @throws IllegalArgumentException if the parser fails to
* subclass an existing numeric parser, or field is null
- *
- * @deprecated (4.0) use EntryCreator version
*/
- @Deprecated
public SortField(String field, FieldCache.Parser parser, boolean reverse) {
- if (field == null) {
- throw new IllegalArgumentException("field can only be null when type is SCORE or DOC");
- }
- this.field = field;
- this.reverse = reverse;
-
- if (parser instanceof FieldCache.IntParser) {
- this.creator = new IntValuesCreator( field, (FieldCache.IntParser)parser );
- }
- else if (parser instanceof FieldCache.FloatParser) {
- this.creator = new FloatValuesCreator( field, (FieldCache.FloatParser)parser );
- }
- else if (parser instanceof FieldCache.ShortParser) {
- this.creator = new ShortValuesCreator( field, (FieldCache.ShortParser)parser );
- }
- else if (parser instanceof FieldCache.ByteParser) {
- this.creator = new ByteValuesCreator( field, (FieldCache.ByteParser)parser );
- }
- else if (parser instanceof FieldCache.LongParser) {
- this.creator = new LongValuesCreator( field, (FieldCache.LongParser)parser );
- }
- else if (parser instanceof FieldCache.DoubleParser) {
- this.creator = new DoubleValuesCreator( field, (FieldCache.DoubleParser)parser );
- }
- else
+ if (parser instanceof FieldCache.IntParser) initFieldType(field, Type.INT);
+ else if (parser instanceof FieldCache.FloatParser) initFieldType(field, Type.FLOAT);
+ else if (parser instanceof FieldCache.ShortParser) initFieldType(field, Type.SHORT);
+ else if (parser instanceof FieldCache.ByteParser) initFieldType(field, Type.BYTE);
+ else if (parser instanceof FieldCache.LongParser) initFieldType(field, Type.LONG);
+ else if (parser instanceof FieldCache.DoubleParser) initFieldType(field, Type.DOUBLE);
+ else {
throw new IllegalArgumentException("Parser instance does not subclass existing numeric parser from FieldCache (got " + parser + ")");
+ }
- this.type = this.creator.getSortType();
- }
-
- /**
- * Sort by a cached entry value
- * @param creator
- * @param reverse
- */
- public SortField( CachedArrayCreator<?> creator, boolean reverse )
- {
- this.field = creator.field;
this.reverse = reverse;
- this.creator = creator;
- this.type = creator.getSortType();
+ this.parser = parser;
}
- public SortField setMissingValue( Object v )
- {
- missingValue = v;
- if( missingValue != null ) {
- if( this.creator == null ) {
- throw new IllegalArgumentException( "Missing value only works for sort fields with a CachedArray" );
- }
-
- // Set the flag to get bits
- creator.setFlag( CachedArrayCreator.OPTION_CACHE_BITS );
+ public SortField setMissingValue(Object missingValue) {
+ if (type != Type.BYTE && type != Type.SHORT && type != Type.INT && type != Type.FLOAT && type != Type.LONG && type != Type.DOUBLE) {
+ throw new IllegalArgumentException( "Missing value only works for numeric types" );
}
+ this.missingValue = missingValue;
return this;
}
@@ -246,23 +206,12 @@ public class SortField {
private void initFieldType(String field, Type type) {
this.type = type;
if (field == null) {
- if (type != Type.SCORE && type != Type.DOC)
+ if (type != Type.SCORE && type != Type.DOC) {
throw new IllegalArgumentException("field can only be null when type is SCORE or DOC");
+ }
} else {
this.field = field;
}
-
- if( creator != null ) {
- throw new IllegalStateException( "creator already exists: "+creator );
- }
- switch( type ) {
- case BYTE: creator = new ByteValuesCreator( field, null ); break;
- case SHORT: creator = new ShortValuesCreator( field, null ); break;
- case INT: creator = new IntValuesCreator( field, null ); break;
- case LONG: creator = new LongValuesCreator( field, null ); break;
- case FLOAT: creator = new FloatValuesCreator( field, null ); break;
- case DOUBLE: creator = new DoubleValuesCreator( field, null ); break;
- }
}
/** Returns the name of the field. Could return <code>null</code>
@@ -283,15 +232,9 @@ public class SortField {
/** Returns the instance of a {@link FieldCache} parser that fits to the given sort type.
* May return <code>null</code> if no parser was specified. Sorting is using the default parser then.
* @return An instance of a {@link FieldCache} parser, or <code>null</code>.
- * @deprecated (4.0) use getEntryCreator()
*/
- @Deprecated
public FieldCache.Parser getParser() {
- return (creator==null) ? null : creator.getParser();
- }
-
- public CachedArrayCreator<?> getEntryCreator() {
- return creator;
+ return parser;
}
/** Returns whether the sort should be reversed.
@@ -311,6 +254,7 @@ public class SortField {
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
+ String dv = useIndexValues ? " [dv]" : "";
switch (type) {
case SCORE:
buffer.append("<score>");
@@ -321,11 +265,11 @@ public class SortField {
break;
case STRING:
- buffer.append("<string: \"").append(field).append("\">");
+ buffer.append("<string" + dv + ": \"").append(field).append("\">");
break;
case STRING_VAL:
- buffer.append("<string_val: \"").append(field).append("\">");
+ buffer.append("<string_val" + dv + ": \"").append(field).append("\">");
break;
case BYTE:
@@ -337,7 +281,7 @@ public class SortField {
break;
case INT:
- buffer.append("<int: \"").append(field).append("\">");
+ buffer.append("<int" + dv + ": \"").append(field).append("\">");
break;
case LONG:
@@ -345,11 +289,11 @@ public class SortField {
break;
case FLOAT:
- buffer.append("<float: \"").append(field).append("\">");
+ buffer.append("<float" + dv + ": \"").append(field).append("\">");
break;
case DOUBLE:
- buffer.append("<double: \"").append(field).append("\">");
+ buffer.append("<double" + dv + ": \"").append(field).append("\">");
break;
case CUSTOM:
@@ -365,7 +309,6 @@ public class SortField {
break;
}
- if (creator != null) buffer.append('(').append(creator).append(')');
if (reverse) buffer.append('!');
return buffer.toString();
@@ -385,7 +328,6 @@ public class SortField {
&& other.type == this.type
&& other.reverse == this.reverse
&& (other.comparatorSource == null ? this.comparatorSource == null : other.comparatorSource.equals(this.comparatorSource))
- && (other.creator == null ? this.creator == null : other.creator.equals(this.creator))
);
}
@@ -399,7 +341,6 @@ public class SortField {
int hash = type.hashCode() ^ 0x346565dd + Boolean.valueOf(reverse).hashCode() ^ 0xaf5998bb;
if (field != null) hash += field.hashCode()^0xff5685dd;
if (comparatorSource != null) hash += comparatorSource.hashCode();
- if (creator != null) hash += creator.hashCode()^0x3aaf56ff;
return hash;
}
@@ -448,37 +389,45 @@ public class SortField {
if (useIndexValues) {
return new FieldComparator.IntDocValuesComparator(numHits, field);
} else {
- return new FieldComparator.IntComparator(numHits, (IntValuesCreator)creator, (Integer) missingValue);
+ return new FieldComparator.IntComparator(numHits, field, parser, (Integer) missingValue);
}
case FLOAT:
if (useIndexValues) {
return new FieldComparator.FloatDocValuesComparator(numHits, field);
} else {
- return new FieldComparator.FloatComparator(numHits, (FloatValuesCreator) creator, (Float) missingValue);
+ return new FieldComparator.FloatComparator(numHits, field, parser, (Float) missingValue);
}
case LONG:
- return new FieldComparator.LongComparator(numHits, (LongValuesCreator)creator, (Long)missingValue );
+ return new FieldComparator.LongComparator(numHits, field, parser, (Long) missingValue);
case DOUBLE:
- return new FieldComparator.DoubleComparator(numHits, (DoubleValuesCreator)creator, (Double)missingValue );
+ return new FieldComparator.DoubleComparator(numHits, field, parser, (Double) missingValue);
case BYTE:
- return new FieldComparator.ByteComparator(numHits, (ByteValuesCreator)creator, (Byte)missingValue );
+ return new FieldComparator.ByteComparator(numHits, field, parser, (Byte) missingValue);
case SHORT:
- return new FieldComparator.ShortComparator(numHits, (ShortValuesCreator)creator, (Short)missingValue );
+ return new FieldComparator.ShortComparator(numHits, field, parser, (Short) missingValue);
case CUSTOM:
assert comparatorSource != null;
return comparatorSource.newComparator(field, numHits, sortPos, reverse);
case STRING:
- return new FieldComparator.TermOrdValComparator(numHits, field, sortPos, reverse);
+ if (useIndexValues) {
+ return new FieldComparator.TermOrdValDocValuesComparator(numHits, field);
+ } else {
+ return new FieldComparator.TermOrdValComparator(numHits, field);
+ }
case STRING_VAL:
- return new FieldComparator.TermValComparator(numHits, field);
+ if (useIndexValues) {
+ return new FieldComparator.TermValDocValuesComparator(numHits, field);
+ } else {
+ return new FieldComparator.TermValComparator(numHits, field);
+ }
case REWRITEABLE:
throw new IllegalStateException("SortField needs to be rewritten through Sort.rewrite(..) and SortField.rewrite(..)");
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/TimeLimitingCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/TimeLimitingCollector.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/TimeLimitingCollector.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/TimeLimitingCollector.java Mon Nov 14 22:36:20 2011
@@ -20,6 +20,7 @@ package org.apache.lucene.search;
import java.io.IOException;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.util.Counter;
import org.apache.lucene.util.ThreadInterruptedException;
/**
@@ -30,69 +31,9 @@ import org.apache.lucene.util.ThreadInte
*/
public class TimeLimitingCollector extends Collector {
- /**
- * Default timer resolution.
- * @see #setResolution(long)
- */
- public static final int DEFAULT_RESOLUTION = 20;
-
- /**
- * Default for {@link #isGreedy()}.
- * @see #isGreedy()
- */
- public boolean DEFAULT_GREEDY = false;
-
- private static long resolution = DEFAULT_RESOLUTION;
-
- private boolean greedy = DEFAULT_GREEDY ;
-
- private static final class TimerThread extends Thread {
-
- // NOTE: we can avoid explicit synchronization here for several reasons:
- // * updates to volatile long variables are atomic
- // * only single thread modifies this value
- // * use of volatile keyword ensures that it does not reside in
- // a register, but in main memory (so that changes are visible to
- // other threads).
- // * visibility of changes does not need to be instantaneous, we can
- // afford losing a tick or two.
- //
- // See section 17 of the Java Language Specification for details.
- private volatile long time = 0;
-
- /**
- * TimerThread provides a pseudo-clock service to all searching
- * threads, so that they can count elapsed time with less overhead
- * than repeatedly calling System.currentTimeMillis. A single
- * thread should be created to be used for all searches.
- */
- private TimerThread() {
- super("TimeLimitedCollector timer thread");
- this.setDaemon( true );
- }
-
- @Override
- public void run() {
- while (true) {
- // TODO: Use System.nanoTime() when Lucene moves to Java SE 5.
- time += resolution;
- try {
- Thread.sleep( resolution );
- } catch (InterruptedException ie) {
- throw new ThreadInterruptedException(ie);
- }
- }
- }
-
- /**
- * Get the timer value in milliseconds.
- */
- public long getMilliseconds() {
- return time;
- }
- }
/** Thrown when elapsed search time exceeds allowed search time. */
+ @SuppressWarnings("serial")
public static class TimeExceededException extends RuntimeException {
private long timeAllowed;
private long timeElapsed;
@@ -117,58 +58,59 @@ public class TimeLimitingCollector exten
}
}
- // Declare and initialize a single static timer thread to be used by
- // all TimeLimitedCollector instances. The JVM assures that
- // this only happens once.
- private final static TimerThread TIMER_THREAD = new TimerThread();
-
- static {
- TIMER_THREAD.start();
- }
-
- private final long t0;
- private final long timeout;
+ private long t0 = Long.MIN_VALUE;
+ private long timeout = Long.MIN_VALUE;
private final Collector collector;
-
+ private final Counter clock;
+ private final long ticksAllowed;
+ private boolean greedy = false;
private int docBase;
/**
* Create a TimeLimitedCollector wrapper over another {@link Collector} with a specified timeout.
* @param collector the wrapped {@link Collector}
- * @param timeAllowed max time allowed for collecting hits after which {@link TimeExceededException} is thrown
+ * @param clock the timer clock
+ * @param ticksAllowed max time allowed for collecting
+ * hits after which {@link TimeExceededException} is thrown
*/
- public TimeLimitingCollector(final Collector collector, final long timeAllowed ) {
+ public TimeLimitingCollector(final Collector collector, Counter clock, final long ticksAllowed ) {
this.collector = collector;
- t0 = TIMER_THREAD.getMilliseconds();
- this.timeout = t0 + timeAllowed;
+ this.clock = clock;
+ this.ticksAllowed = ticksAllowed;
}
-
- /**
- * Return the timer resolution.
- * @see #setResolution(long)
- */
- public static long getResolution() {
- return resolution;
+
+ /**
+ * Sets the baseline for this collector. By default the collectors baseline is
+ * initialized once the first reader is passed to the collector.
+ * To include operations executed in prior to the actual document collection
+ * set the baseline through this method in your prelude.
+ * <p>
+ * Example usage:
+ * <pre>
+ * Counter clock = ...;
+ * long baseline = clock.get();
+ * // ... prepare search
+ * TimeLimitingCollector collector = new TimeLimitingCollector(c, clock, numTicks);
+ * collector.setBaseline(baseline);
+ * indexSearcher.search(query, collector);
+ * </pre>
+ * </p>
+ * @see #setBaseline()
+ * @param clockTime
+ */
+ public void setBaseline(long clockTime) {
+ t0 = clockTime;
+ timeout = t0 + ticksAllowed;
}
-
+
/**
- * Set the timer resolution.
- * The default timer resolution is 20 milliseconds.
- * This means that a search required to take no longer than
- * 800 milliseconds may be stopped after 780 to 820 milliseconds.
- * <br>Note that:
- * <ul>
- * <li>Finer (smaller) resolution is more accurate but less efficient.</li>
- * <li>Setting resolution to less than 5 milliseconds will be silently modified to 5 milliseconds.</li>
- * <li>Setting resolution smaller than current resolution might take effect only after current
- * resolution. (Assume current resolution of 20 milliseconds is modified to 5 milliseconds,
- * then it can take up to 20 milliseconds for the change to have effect.</li>
- * </ul>
+ * Syntactic sugar for {@link #setBaseline(long)} using {@link Counter#get()}
+ * on the clock passed to the construcutor.
*/
- public static void setResolution(long newResolution) {
- resolution = Math.max(newResolution,5); // 5 milliseconds is about the minimum reasonable time for a Object.wait(long) call.
+ public void setBaseline() {
+ setBaseline(clock.get());
}
-
+
/**
* Checks if this time limited collector is greedy in collecting the last hit.
* A non greedy collector, upon a timeout, would throw a {@link TimeExceededException}
@@ -199,7 +141,7 @@ public class TimeLimitingCollector exten
*/
@Override
public void collect(final int doc) throws IOException {
- long time = TIMER_THREAD.getMilliseconds();
+ final long time = clock.get();
if (timeout < time) {
if (greedy) {
//System.out.println(this+" greedy: before failing, collecting doc: "+(docBase + doc)+" "+(time-t0));
@@ -216,6 +158,9 @@ public class TimeLimitingCollector exten
public void setNextReader(AtomicReaderContext context) throws IOException {
collector.setNextReader(context);
this.docBase = context.docBase;
+ if (Long.MIN_VALUE == t0) {
+ setBaseline();
+ }
}
@Override
@@ -228,4 +173,131 @@ public class TimeLimitingCollector exten
return collector.acceptsDocsOutOfOrder();
}
+
+ /**
+ * Returns the global TimerThreads {@link Counter}
+ * <p>
+ * Invoking this creates may create a new instance of {@link TimerThread} iff
+ * the global {@link TimerThread} has never been accessed before. The thread
+ * returned from this method is started on creation and will be alive unless
+ * you stop the {@link TimerThread} via {@link TimerThread#stopTimer()}.
+ * </p>
+ * @return the global TimerThreads {@link Counter}
+ * @lucene.experimental
+ */
+ public static Counter getGlobalCounter() {
+ return TimerThreadHolder.THREAD.counter;
+ }
+
+ /**
+ * Returns the global {@link TimerThread}.
+ * <p>
+ * Invoking this creates may create a new instance of {@link TimerThread} iff
+ * the global {@link TimerThread} has never been accessed before. The thread
+ * returned from this method is started on creation and will be alive unless
+ * you stop the {@link TimerThread} via {@link TimerThread#stopTimer()}.
+ * </p>
+ *
+ * @return the global {@link TimerThread}
+ * @lucene.experimental
+ */
+ public static TimerThread getGlobalTimerThread() {
+ return TimerThreadHolder.THREAD;
+ }
+
+ private static final class TimerThreadHolder {
+ static final TimerThread THREAD;
+ static {
+ THREAD = new TimerThread(Counter.newCounter(true));
+ THREAD.start();
+ }
+ }
+
+ /**
+ * @lucene.experimental
+ */
+ public static final class TimerThread extends Thread {
+
+ public static final String THREAD_NAME = "TimeLimitedCollector timer thread";
+ public static final int DEFAULT_RESOLUTION = 20;
+ // NOTE: we can avoid explicit synchronization here for several reasons:
+ // * updates to volatile long variables are atomic
+ // * only single thread modifies this value
+ // * use of volatile keyword ensures that it does not reside in
+ // a register, but in main memory (so that changes are visible to
+ // other threads).
+ // * visibility of changes does not need to be instantaneous, we can
+ // afford losing a tick or two.
+ //
+ // See section 17 of the Java Language Specification for details.
+ private volatile long time = 0;
+ private volatile boolean stop = false;
+ private volatile long resolution;
+ final Counter counter;
+
+ public TimerThread(long resolution, Counter counter) {
+ super(THREAD_NAME);
+ this.resolution = resolution;
+ this.counter = counter;
+ this.setDaemon(true);
+ }
+
+ public TimerThread(Counter counter) {
+ this(DEFAULT_RESOLUTION, counter);
+ }
+
+ @Override
+ public void run() {
+ while (!stop) {
+ // TODO: Use System.nanoTime() when Lucene moves to Java SE 5.
+ counter.addAndGet(resolution);
+ try {
+ Thread.sleep( resolution );
+ } catch (InterruptedException ie) {
+ throw new ThreadInterruptedException(ie);
+ }
+ }
+ }
+
+ /**
+ * Get the timer value in milliseconds.
+ */
+ public long getMilliseconds() {
+ return time;
+ }
+
+ /**
+ * Stops the timer thread
+ */
+ public void stopTimer() {
+ stop = true;
+ }
+
+ /**
+ * Return the timer resolution.
+ * @see #setResolution(long)
+ */
+ public long getResolution() {
+ return resolution;
+ }
+
+ /**
+ * Set the timer resolution.
+ * The default timer resolution is 20 milliseconds.
+ * This means that a search required to take no longer than
+ * 800 milliseconds may be stopped after 780 to 820 milliseconds.
+ * <br>Note that:
+ * <ul>
+ * <li>Finer (smaller) resolution is more accurate but less efficient.</li>
+ * <li>Setting resolution to less than 5 milliseconds will be silently modified to 5 milliseconds.</li>
+ * <li>Setting resolution smaller than current resolution might take effect only after current
+ * resolution. (Assume current resolution of 20 milliseconds is modified to 5 milliseconds,
+ * then it can take up to 20 milliseconds for the change to have effect.</li>
+ * </ul>
+ */
+ public void setResolution(long resolution) {
+ this.resolution = Math.max(resolution, 5); // 5 milliseconds is about the minimum reasonable time for a Object.wait(long) call.
+ }
+ }
+
}
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java Mon Nov 14 22:36:20 2011
@@ -163,7 +163,6 @@ public abstract class TopTermsRewrite<Q
assert reader.docFreq(term) == st.termState.docFreq() : "reader DF is " + reader.docFreq(term) + " vs " + st.termState.docFreq() + " term=" + term;
addClause(q, term, st.termState.docFreq(), query.getBoost() * st.boost, st.termState); // add to query
}
- query.incTotalNumberOfTerms(scoreTerms.length);
return q;
}
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java Mon Nov 14 22:36:20 2011
@@ -150,7 +150,7 @@ public class PayloadNearQuery extends Sp
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
- return new PayloadNearSpanScorer(query.getSpans(context, acceptDocs), this,
+ return new PayloadNearSpanScorer(query.getSpans(context, acceptDocs, termContexts), this,
similarity, similarity.sloppyDocScorer(stats, query.getField(), context));
}
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java Mon Nov 14 22:36:20 2011
@@ -20,8 +20,11 @@ package org.apache.lucene.search.payload
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
+import java.util.TreeSet;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
@@ -41,6 +44,7 @@ import org.apache.lucene.search.spans.Sp
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util.TermContext;
/**
* Experimental class to get set of payloads for most standard Lucene queries.
@@ -174,9 +178,15 @@ public class PayloadSpanUtil {
private void getPayloads(Collection<byte []> payloads, SpanQuery query)
throws IOException {
+ Map<Term,TermContext> termContexts = new HashMap<Term,TermContext>();
+ TreeSet<Term> terms = new TreeSet<Term>();
+ query.extractTerms(terms);
+ for (Term term : terms) {
+ termContexts.put(term, TermContext.build(context, term, true));
+ }
final AtomicReaderContext[] leaves = ReaderUtil.leaves(context);
for (AtomicReaderContext atomicReaderContext : leaves) {
- final Spans spans = query.getSpans(atomicReaderContext, atomicReaderContext.reader.getLiveDocs());
+ final Spans spans = query.getSpans(atomicReaderContext, atomicReaderContext.reader.getLiveDocs(), termContexts);
while (spans.next() == true) {
if (spans.isPayloadAvailable()) {
Collection<byte[]> payload = spans.getPayload();
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java Mon Nov 14 22:36:20 2011
@@ -81,7 +81,7 @@ public class PayloadTermQuery extends Sp
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
- return new PayloadTermSpanScorer((TermSpans) query.getSpans(context, acceptDocs),
+ return new PayloadTermSpanScorer((TermSpans) query.getSpans(context, acceptDocs, termContexts),
this, similarity.sloppyDocScorer(stats, query.getField(), context));
}
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java Mon Nov 14 22:36:20 2011
@@ -18,6 +18,7 @@ package org.apache.lucene.search.spans;
*/
import java.io.IOException;
+import java.util.Map;
import java.util.Set;
import org.apache.lucene.index.IndexReader;
@@ -27,6 +28,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.TermContext;
import org.apache.lucene.util.ToStringUtils;
/**
@@ -93,8 +95,8 @@ public class FieldMaskingSpanQuery exten
// ...this is done to be more consistent with things like SpanFirstQuery
@Override
- public Spans getSpans(AtomicReaderContext context, Bits acceptDocs) throws IOException {
- return maskedQuery.getSpans(context, acceptDocs);
+ public Spans getSpans(AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
+ return maskedQuery.getSpans(context, acceptDocs, termContexts);
}
@Override
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java Mon Nov 14 22:36:20 2011
@@ -17,9 +17,11 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
+import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.TermContext;
import java.io.IOException;
import java.util.ArrayList;
@@ -28,6 +30,7 @@ import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Collection;
+import java.util.Map;
import java.util.Set;
/** A Spans that is formed from the ordered subspans of a SpanNearQuery
@@ -78,11 +81,11 @@ public class NearSpansOrdered extends Sp
private SpanNearQuery query;
private boolean collectPayloads = true;
- public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, Bits acceptDocs) throws IOException {
- this(spanNearQuery, context, acceptDocs, true);
+ public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
+ this(spanNearQuery, context, acceptDocs, termContexts, true);
}
- public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, Bits acceptDocs, boolean collectPayloads)
+ public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts, boolean collectPayloads)
throws IOException {
if (spanNearQuery.getClauses().length < 2) {
throw new IllegalArgumentException("Less than 2 clauses: "
@@ -95,7 +98,7 @@ public class NearSpansOrdered extends Sp
matchPayload = new LinkedList<byte[]>();
subSpansByDoc = new Spans[clauses.length];
for (int i = 0; i < clauses.length; i++) {
- subSpans[i] = clauses[i].getSpans(context, acceptDocs);
+ subSpans[i] = clauses[i].getSpans(context, acceptDocs, termContexts);
subSpansByDoc[i] = subSpans[i]; // used in toSameDoc()
}
query = spanNearQuery; // kept for toString() only.
Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java Mon Nov 14 22:36:20 2011
@@ -17,14 +17,17 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
+import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.TermContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.HashSet;
@@ -132,7 +135,7 @@ public class NearSpansUnordered extends
}
- public NearSpansUnordered(SpanNearQuery query, AtomicReaderContext context, Bits acceptDocs)
+ public NearSpansUnordered(SpanNearQuery query, AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts)
throws IOException {
this.query = query;
this.slop = query.getSlop();
@@ -142,7 +145,7 @@ public class NearSpansUnordered extends
subSpans = new Spans[clauses.length];
for (int i = 0; i < clauses.length; i++) {
SpansCell cell =
- new SpansCell(clauses[i].getSpans(context, acceptDocs), i);
+ new SpansCell(clauses[i].getSpans(context, acceptDocs, termContexts), i);
ordered.add(cell);
subSpans[i] = cell.spans;
}