You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by bu...@apache.org on 2011/01/21 20:38:14 UTC
svn commit: r1061979 [6/7] - in /lucene/dev/branches/realtime_search: ./
lucene/ lucene/contrib/ lucene/contrib/ant/ lucene/contrib/db/
lucene/contrib/db/bdb-je/ lucene/contrib/db/bdb/ lucene/contrib/demo/
lucene/contrib/demo/src/java/org/apache/lucene...
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java Fri Jan 21 19:38:06 2011
@@ -17,6 +17,7 @@
package org.apache.solr.analysis;
import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.solr.common.SolrException;
import org.apache.solr.schema.DateField;
@@ -56,6 +57,9 @@ final class TrieTokenizer extends Tokeni
protected final int precisionStep;
protected final TrieTypes type;
protected final NumericTokenStream ts;
+
+ protected final OffsetAttribute ofsAtt = addAttribute(OffsetAttribute.class);
+ protected int startOfs, endOfs;
static NumericTokenStream getNumericTokenStream(int precisionStep) {
return new NumericTokenStream(precisionStep);
@@ -82,6 +86,8 @@ final class TrieTokenizer extends Tokeni
input = super.input;
char[] buf = new char[32];
int len = input.read(buf);
+ this.startOfs = correctOffset(0);
+ this.endOfs = correctOffset(len);
String v = new String(buf, 0, len);
switch (type) {
case INTEGER:
@@ -105,13 +111,32 @@ final class TrieTokenizer extends Tokeni
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to create TrieIndexTokenizer", e);
}
+ }
+ @Override
+ public void close() throws IOException {
+ super.close();
+ ts.close();
+ }
+
+ @Override
+ public void reset() throws IOException {
+ super.reset();
ts.reset();
}
-
@Override
public boolean incrementToken() throws IOException {
- return ts.incrementToken();
+ if (ts.incrementToken()) {
+ ofsAtt.setOffset(startOfs, endOfs);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void end() throws IOException {
+ ts.end();
+ ofsAtt.setOffset(endOfs, endOfs);
}
}
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java Fri Jan 21 19:38:06 2011
@@ -20,10 +20,14 @@ package org.apache.solr.handler;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharReader;
import org.apache.lucene.analysis.CharStream;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.*;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.AttributeReflector;
+import org.apache.lucene.util.SorterTemplate;
import org.apache.solr.analysis.CharFilterFactory;
import org.apache.solr.analysis.TokenFilterFactory;
import org.apache.solr.analysis.TokenizerChain;
@@ -34,6 +38,9 @@ import org.apache.solr.common.SolrExcept
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.schema.FieldType;
+import org.apache.solr.util.ByteUtils;
+
+import org.apache.noggit.CharArr;
import java.io.IOException;
import java.io.StringReader;
@@ -47,7 +54,7 @@ import java.util.*;
*/
public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase {
- public static final Set<String> EMPTY_STRING_SET = Collections.emptySet();
+ public static final Set<BytesRef> EMPTY_BYTES_SET = Collections.emptySet();
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
rsp.add("analysis", doAnalysis(req));
@@ -107,7 +114,7 @@ public abstract class AnalysisRequestHan
}
TokenStream tokenStream = tfac.create(tokenizerChain.charStream(new StringReader(value)));
- List<Token> tokens = analyzeTokenStream(tokenStream);
+ List<AttributeSource> tokens = analyzeTokenStream(tokenStream);
namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokens, context));
@@ -115,7 +122,7 @@ public abstract class AnalysisRequestHan
for (TokenFilterFactory tokenFilterFactory : filtfacs) {
tokenStream = tokenFilterFactory.create(listBasedTokenStream);
- List<Token> tokenList = analyzeTokenStream(tokenStream);
+ List<AttributeSource> tokenList = analyzeTokenStream(tokenStream);
namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokenList, context));
listBasedTokenStream = new ListBasedTokenStream(tokenList);
}
@@ -126,14 +133,24 @@ public abstract class AnalysisRequestHan
/**
* Analyzes the given text using the given analyzer and returns the produced tokens.
*
- * @param value The value to analyze.
+ * @param query The query to analyze.
* @param analyzer The analyzer to use.
- *
- * @return The produces token list.
*/
- protected List<Token> analyzeValue(String value, Analyzer analyzer) {
- TokenStream tokenStream = analyzer.tokenStream("", new StringReader(value));
- return analyzeTokenStream(tokenStream);
+ protected Set<BytesRef> getQueryTokenSet(String query, Analyzer analyzer) {
+ final Set<BytesRef> tokens = new HashSet<BytesRef>();
+ final TokenStream tokenStream = analyzer.tokenStream("", new StringReader(query));
+ final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
+ try {
+ tokenStream.reset();
+ while (tokenStream.incrementToken()) {
+ final BytesRef bytes = new BytesRef();
+ bytesAtt.toBytesRef(bytes);
+ tokens.add(bytes);
+ }
+ } catch (IOException ioe) {
+ throw new RuntimeException("Error occured while iterating over tokenstream", ioe);
+ }
+ return tokens;
}
/**
@@ -143,41 +160,17 @@ public abstract class AnalysisRequestHan
*
* @return List of tokens produced from the TokenStream
*/
- private List<Token> analyzeTokenStream(TokenStream tokenStream) {
- List<Token> tokens = new ArrayList<Token>();
-
- // TODO change this API to support custom attributes
- CharTermAttribute termAtt = null;
- TermToBytesRefAttribute bytesAtt = null;
- if (tokenStream.hasAttribute(CharTermAttribute.class)) {
- termAtt = tokenStream.getAttribute(CharTermAttribute.class);
- } else if (tokenStream.hasAttribute(TermToBytesRefAttribute.class)) {
- bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
- }
- final OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
- final TypeAttribute typeAtt = tokenStream.addAttribute(TypeAttribute.class);
- final PositionIncrementAttribute posIncAtt = tokenStream.addAttribute(PositionIncrementAttribute.class);
- final FlagsAttribute flagsAtt = tokenStream.addAttribute(FlagsAttribute.class);
- final PayloadAttribute payloadAtt = tokenStream.addAttribute(PayloadAttribute.class);
-
+ private List<AttributeSource> analyzeTokenStream(TokenStream tokenStream) {
+ List<AttributeSource> tokens = new ArrayList<AttributeSource>();
+ // for backwards compatibility, add all "common" attributes
+ tokenStream.addAttribute(PositionIncrementAttribute.class);
+ tokenStream.addAttribute(OffsetAttribute.class);
+ tokenStream.addAttribute(TypeAttribute.class);
final BytesRef bytes = new BytesRef();
try {
+ tokenStream.reset();
while (tokenStream.incrementToken()) {
- Token token = new Token();
- if (termAtt != null) {
- token.setEmpty().append(termAtt);
- }
- if (bytesAtt != null) {
- bytesAtt.toBytesRef(bytes);
- // TODO: This is incorrect when numeric fields change in later lucene versions. It should use BytesRef directly!
- token.setEmpty().append(bytes.utf8ToString());
- }
- token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
- token.setType(typeAtt.type());
- token.setFlags(flagsAtt.getFlags());
- token.setPayload(payloadAtt.getPayload());
- token.setPositionIncrement(posIncAtt.getPositionIncrement());
- tokens.add((Token) token.clone());
+ tokens.add(tokenStream.cloneAttributes());
}
} catch (IOException ioe) {
throw new RuntimeException("Error occured while iterating over tokenstream", ioe);
@@ -186,6 +179,13 @@ public abstract class AnalysisRequestHan
return tokens;
}
+ // a static mapping of the reflected attribute keys to the names used in Solr 1.4
+ static Map<String,String> ATTRIBUTE_MAPPING = Collections.unmodifiableMap(new HashMap<String,String>() {{
+ put(OffsetAttribute.class.getName() + "#startOffset", "start");
+ put(OffsetAttribute.class.getName() + "#endOffset", "end");
+ put(TypeAttribute.class.getName() + "#type", "type");
+ }});
+
/**
* Converts the list of Tokens to a list of NamedLists representing the tokens.
*
@@ -194,41 +194,100 @@ public abstract class AnalysisRequestHan
*
* @return List of NamedLists containing the relevant information taken from the tokens
*/
- private List<NamedList> convertTokensToNamedLists(List<Token> tokens, AnalysisContext context) {
- List<NamedList> tokensNamedLists = new ArrayList<NamedList>();
+ private List<NamedList> convertTokensToNamedLists(final List<AttributeSource> tokens, AnalysisContext context) {
+ final List<NamedList> tokensNamedLists = new ArrayList<NamedList>();
- Collections.sort(tokens, new Comparator<Token>() {
- public int compare(Token o1, Token o2) {
- return o1.endOffset() - o2.endOffset();
+ final int[] positions = new int[tokens.size()];
+ int position = 0;
+ for (int i = 0, c = tokens.size(); i < c; i++) {
+ AttributeSource token = tokens.get(i);
+ position += token.addAttribute(PositionIncrementAttribute.class).getPositionIncrement();
+ positions[i] = position;
+ }
+
+ // sort the tokens by absoulte position
+ new SorterTemplate() {
+ @Override
+ protected void swap(int i, int j) {
+ final int p = positions[i];
+ positions[i] = positions[j];
+ positions[j] = p;
+ Collections.swap(tokens, i, j);
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return positions[i] - positions[j];
}
- });
- int position = 0;
+ @Override
+ protected void setPivot(int i) {
+ pivot = positions[i];
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ return pivot - positions[j];
+ }
+
+ private int pivot;
+ }.mergeSort(0, tokens.size() - 1);
FieldType fieldType = context.getFieldType();
- for (Token token : tokens) {
- NamedList<Object> tokenNamedList = new SimpleOrderedMap<Object>();
+ final BytesRef rawBytes = new BytesRef();
+ final CharArr textBuf = new CharArr();
+ for (int i = 0, c = tokens.size(); i < c; i++) {
+ AttributeSource token = tokens.get(i);
+ final NamedList<Object> tokenNamedList = new SimpleOrderedMap<Object>();
+ token.getAttribute(TermToBytesRefAttribute.class).toBytesRef(rawBytes);
+
+ textBuf.reset();
+ fieldType.indexedToReadable(rawBytes, textBuf);
+ final String text = textBuf.toString();
- String text = fieldType.indexedToReadable(token.toString());
tokenNamedList.add("text", text);
- if (!text.equals(token.toString())) {
- tokenNamedList.add("raw_text", token.toString());
+
+ if (token.hasAttribute(CharTermAttribute.class)) {
+ final String rawText = token.getAttribute(CharTermAttribute.class).toString();
+ if (!rawText.equals(text)) {
+ tokenNamedList.add("raw_text", rawText);
+ }
}
- tokenNamedList.add("type", token.type());
- tokenNamedList.add("start", token.startOffset());
- tokenNamedList.add("end", token.endOffset());
- position += token.getPositionIncrement();
- tokenNamedList.add("position", position);
+ tokenNamedList.add("raw_bytes", rawBytes.toString());
- if (context.getTermsToMatch().contains(token.toString())) {
+ if (context.getTermsToMatch().contains(rawBytes)) {
tokenNamedList.add("match", true);
}
- if (token.getPayload() != null) {
- tokenNamedList.add("payload", token.getPayload());
- }
+ tokenNamedList.add("position", positions[i]);
+
+ token.reflectWith(new AttributeReflector() {
+ public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
+ // leave out position and bytes term
+ if (TermToBytesRefAttribute.class.isAssignableFrom(attClass))
+ return;
+ if (CharTermAttribute.class.isAssignableFrom(attClass))
+ return;
+ if (PositionIncrementAttribute.class.isAssignableFrom(attClass))
+ return;
+
+ String k = attClass.getName() + '#' + key;
+
+ // map keys for "standard attributes":
+ if (ATTRIBUTE_MAPPING.containsKey(k)) {
+ k = ATTRIBUTE_MAPPING.get(k);
+ }
+
+ if (value instanceof Payload) {
+ final Payload p = (Payload) value;
+ value = new BytesRef(p.getData()).toString();
+ }
+
+ tokenNamedList.add(k, value);
+ }
+ });
tokensNamedLists.add(tokenNamedList);
}
@@ -261,38 +320,27 @@ public abstract class AnalysisRequestHan
*/
// TODO refactor to support custom attributes
protected final static class ListBasedTokenStream extends TokenStream {
- private final List<Token> tokens;
- private Iterator<Token> tokenIterator;
+ private final List<AttributeSource> tokens;
+ private Iterator<AttributeSource> tokenIterator;
- private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
- private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
- private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
- private final FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class);
- private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
- private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
/**
* Creates a new ListBasedTokenStream which uses the given tokens as its token source.
*
* @param tokens Source of tokens to be used
*/
- ListBasedTokenStream(List<Token> tokens) {
+ ListBasedTokenStream(List<AttributeSource> tokens) {
this.tokens = tokens;
tokenIterator = tokens.iterator();
}
- /**
- * {@inheritDoc}
- */
@Override
public boolean incrementToken() throws IOException {
if (tokenIterator.hasNext()) {
- Token next = tokenIterator.next();
- termAtt.copyBuffer(next.buffer(), 0, next.length());
- typeAtt.setType(next.type());
- offsetAtt.setOffset(next.startOffset(), next.endOffset());
- flagsAtt.setFlags(next.getFlags());
- payloadAtt.setPayload(next.getPayload());
- posIncAtt.setPositionIncrement(next.getPositionIncrement());
+ AttributeSource next = tokenIterator.next();
+ Iterator<Class<? extends Attribute>> atts = next.getAttributeClassesIterator();
+ while (atts.hasNext()) // make sure all att impls in the token exist here
+ addAttribute(atts.next());
+ next.copyTo(this);
return true;
} else {
return false;
@@ -314,7 +362,7 @@ public abstract class AnalysisRequestHan
private final String fieldName;
private final FieldType fieldType;
private final Analyzer analyzer;
- private final Set<String> termsToMatch;
+ private final Set<BytesRef> termsToMatch;
/**
* Constructs a new AnalysisContext with a given field tpe, analyzer and
@@ -328,7 +376,7 @@ public abstract class AnalysisRequestHan
* @param termsToMatch Holds all the terms that should match during the
* analysis process.
*/
- public AnalysisContext(FieldType fieldType, Analyzer analyzer, Set<String> termsToMatch) {
+ public AnalysisContext(FieldType fieldType, Analyzer analyzer, Set<BytesRef> termsToMatch) {
this(null, fieldType, analyzer, termsToMatch);
}
@@ -343,7 +391,7 @@ public abstract class AnalysisRequestHan
*
*/
public AnalysisContext(String fieldName, FieldType fieldType, Analyzer analyzer) {
- this(fieldName, fieldType, analyzer, EMPTY_STRING_SET);
+ this(fieldName, fieldType, analyzer, EMPTY_BYTES_SET);
}
/**
@@ -359,7 +407,7 @@ public abstract class AnalysisRequestHan
* @param termsToMatch Holds all the terms that should match during the
* analysis process.
*/
- public AnalysisContext(String fieldName, FieldType fieldType, Analyzer analyzer, Set<String> termsToMatch) {
+ public AnalysisContext(String fieldName, FieldType fieldType, Analyzer analyzer, Set<BytesRef> termsToMatch) {
this.fieldName = fieldName;
this.fieldType = fieldType;
this.analyzer = analyzer;
@@ -378,7 +426,7 @@ public abstract class AnalysisRequestHan
return analyzer;
}
- public Set<String> getTermsToMatch() {
+ public Set<BytesRef> getTermsToMatch() {
return termsToMatch;
}
}
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java Fri Jan 21 19:38:06 2011
@@ -19,7 +19,7 @@ package org.apache.solr.handler;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
+import org.apache.lucene.util.BytesRef;
import org.apache.solr.client.solrj.request.DocumentAnalysisRequest;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
@@ -216,21 +216,20 @@ public class DocumentAnalysisRequestHand
FieldType fieldType = schema.getFieldType(name);
- Set<String> termsToMatch = new HashSet<String>();
- if (request.getQuery() != null && request.isShowMatch()) {
- try {
- List<Token> tokens = analyzeValue(request.getQuery(), fieldType.getQueryAnalyzer());
- for (Token token : tokens) {
- termsToMatch.add(token.toString());
- }
- } catch (Exception e) {
- // ignore analysis exceptions since we are applying arbitrary text to all fields
- }
+ final String queryValue = request.getQuery();
+ Set<BytesRef> termsToMatch;
+ try {
+ termsToMatch = (queryValue != null && request.isShowMatch())
+ ? getQueryTokenSet(queryValue, fieldType.getQueryAnalyzer())
+ : EMPTY_BYTES_SET;
+ } catch (Exception e) {
+ // ignore analysis exceptions since we are applying arbitrary text to all fields
+ termsToMatch = EMPTY_BYTES_SET;
}
if (request.getQuery() != null) {
try {
- AnalysisContext analysisContext = new AnalysisContext(fieldType, fieldType.getQueryAnalyzer(), EMPTY_STRING_SET);
+ AnalysisContext analysisContext = new AnalysisContext(fieldType, fieldType.getQueryAnalyzer(), EMPTY_BYTES_SET);
fieldTokens.add("query", analyzeValue(request.getQuery(), analysisContext));
} catch (Exception e) {
// ignore analysis exceptions since we are applying arbitrary text to all fields
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java Fri Jan 21 19:38:06 2011
@@ -17,7 +17,7 @@
package org.apache.solr.handler;
-import org.apache.lucene.analysis.Token;
+import org.apache.lucene.util.BytesRef;
import org.apache.solr.client.solrj.request.FieldAnalysisRequest;
import org.apache.solr.common.params.AnalysisParams;
import org.apache.solr.common.params.CommonParams;
@@ -30,10 +30,7 @@ import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.IndexSchema;
import org.apache.commons.io.IOUtils;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
+import java.util.*;
import java.io.Reader;
import java.io.IOException;
@@ -222,14 +219,10 @@ public class FieldAnalysisRequestHandler
*/
private NamedList<NamedList> analyzeValues(FieldAnalysisRequest analysisRequest, FieldType fieldType, String fieldName) {
- Set<String> termsToMatch = new HashSet<String>();
- String queryValue = analysisRequest.getQuery();
- if (queryValue != null && analysisRequest.isShowMatch()) {
- List<Token> tokens = analyzeValue(queryValue, fieldType.getQueryAnalyzer());
- for (Token token : tokens) {
- termsToMatch.add(token.toString());
- }
- }
+ final String queryValue = analysisRequest.getQuery();
+ final Set<BytesRef> termsToMatch = (queryValue != null && analysisRequest.isShowMatch())
+ ? getQueryTokenSet(queryValue, fieldType.getQueryAnalyzer())
+ : EMPTY_BYTES_SET;
NamedList<NamedList> analyzeResults = new SimpleOrderedMap<NamedList>();
if (analysisRequest.getFieldValue() != null) {
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/component/QueryComponent.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/component/QueryComponent.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/component/QueryComponent.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/handler/component/QueryComponent.java Fri Jan 21 19:38:06 2011
@@ -443,7 +443,7 @@ public class QueryComponent extends Sear
// take the documents given and re-derive the sort values.
boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES,false);
if(fsv){
- Sort sort = rb.getSortSpec().getSort();
+ Sort sort = searcher.weightSort(rb.getSortSpec().getSort());
SortField[] sortFields = sort==null ? new SortField[]{SortField.FIELD_SCORE} : sort.getSort();
NamedList sortVals = new NamedList(); // order is important for the sort fields
Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO); // a dummy Field
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/request/SimpleFacets.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/request/SimpleFacets.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/request/SimpleFacets.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/request/SimpleFacets.java Fri Jan 21 19:38:06 2011
@@ -147,14 +147,17 @@ public class SimpleFacets {
List<Query> qlist = new ArrayList<Query>();
// add the base query
- qlist.add(rb.getQuery());
+ if (!excludeSet.containsKey(rb.getQuery())) {
+ qlist.add(rb.getQuery());
+ }
// add the filters
- for (Query q : rb.getFilters()) {
- if (!excludeSet.containsKey(q)) {
- qlist.add(q);
+ if (rb.getFilters() != null) {
+ for (Query q : rb.getFilters()) {
+ if (!excludeSet.containsKey(q)) {
+ qlist.add(q);
+ }
}
-
}
// get the new base docset for this facet
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/request/UnInvertedField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/request/UnInvertedField.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/request/UnInvertedField.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/request/UnInvertedField.java Fri Jan 21 19:38:06 2011
@@ -996,12 +996,12 @@ class NumberedTermsEnum extends TermsEnu
}
@Override
- public int docFreq() {
+ public int docFreq() throws IOException {
return tenum.docFreq();
}
@Override
- public long totalTermFreq() {
+ public long totalTermFreq() throws IOException {
return tenum.totalTermFreq();
}
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java Fri Jan 21 19:38:06 2011
@@ -32,7 +32,8 @@ import org.apache.solr.schema.SchemaFiel
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
-
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
/**
* A description of the PHP serialization format can be found here:
* http://www.hurring.com/scott/code/perl/serialize/
@@ -105,7 +106,8 @@ class PHPSerializedWriter extends JSONWr
@Override
public void writeDoc(String name, Collection<Fieldable> fields, Set<String> returnFields, Map pseudoFields) throws IOException {
ArrayList<Fieldable> single = new ArrayList<Fieldable>();
- HashMap<String, MultiValueField> multi = new HashMap<String, MultiValueField>();
+ LinkedHashMap<String, MultiValueField> multi
+ = new LinkedHashMap<String, MultiValueField>();
for (Fieldable ff : fields) {
String fname = ff.name();
@@ -201,6 +203,96 @@ class PHPSerializedWriter extends JSONWr
}
@Override
+ public void writeSolrDocument(String name, SolrDocument doc, Set<String> returnFields, Map pseudoFields) throws IOException {
+ LinkedHashMap <String,Object> single = new LinkedHashMap<String, Object>();
+ LinkedHashMap <String,Object> multi = new LinkedHashMap<String, Object>();
+ int pseudoSize = pseudoFields != null ? pseudoFields.size() : 0;
+
+ for (String fname : doc.getFieldNames()) {
+ if(returnFields != null && !returnFields.contains(fname)){
+ continue;
+ }
+
+ Object val = doc.getFieldValue(fname);
+ SchemaField sf = schema.getFieldOrNull(fname);
+ if (sf != null && sf.multiValued()) {
+ multi.put(fname, val);
+ }else{
+ single.put(fname, val);
+ }
+ }
+
+ writeMapOpener(single.size() + multi.size() + pseudoSize);
+ for(String fname: single.keySet()){
+ Object val = single.get(fname);
+ writeKey(fname, true);
+ writeVal(fname, val);
+ }
+
+ for(String fname: multi.keySet()){
+ writeKey(fname, true);
+
+ Object val = multi.get(fname);
+ if (!(val instanceof Collection)) {
+ // should never be reached if multivalued fields are stored as a Collection
+ // so I'm assuming a size of 1 just to wrap the single value
+ writeArrayOpener(1);
+ writeVal(fname, val);
+ writeArrayCloser();
+ }else{
+ writeVal(fname, val);
+ }
+ }
+
+ if (pseudoSize > 0) {
+ writeMap(null,pseudoFields,true, false);
+ }
+ writeMapCloser();
+ }
+
+
+ @Override
+ public void writeSolrDocumentList(String name, SolrDocumentList docs, Set<String> fields, Map otherFields) throws IOException {
+ boolean includeScore=false;
+ if (fields!=null) {
+ includeScore = fields.contains("score");
+ if (fields.size()==0 || (fields.size()==1 && includeScore) || fields.contains("*")) {
+ fields=null; // null means return all stored fields
+ }
+ }
+
+ int sz = docs.size();
+
+ writeMapOpener(includeScore ? 4 : 3);
+
+ writeKey("numFound",false);
+ writeLong(null,docs.getNumFound());
+
+ writeKey("start",false);
+ writeLong(null,docs.getStart());
+
+ if (includeScore && docs.getMaxScore() != null) {
+ writeKey("maxScore",false);
+ writeFloat(null,docs.getMaxScore());
+ }
+
+ writeKey("docs",false);
+
+ writeArrayOpener(sz);
+ for (int i=0; i<sz; i++) {
+ writeKey(i, false);
+ writeSolrDocument(null, docs.get(i), fields, otherFields);
+ }
+ writeArrayCloser();
+
+ if (otherFields !=null) {
+ writeMap(null, otherFields, true, false);
+ }
+ writeMapCloser();
+ }
+
+
+ @Override
public void writeArray(String name, Object[] val) throws IOException {
writeMapOpener(val.length);
for(int i=0; i < val.length; i++) {
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/schema/LatLonType.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/schema/LatLonType.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/schema/LatLonType.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/schema/LatLonType.java Fri Jan 21 19:38:06 2011
@@ -371,7 +371,7 @@ class SpatialDistanceQuery extends Query
@Override
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
- return new SpatialScorer(getSimilarity(searcher), context, this);
+ return new SpatialScorer(context, this);
}
@Override
@@ -404,8 +404,8 @@ class SpatialDistanceQuery extends Query
int lastDistDoc;
double lastDist;
- public SpatialScorer(Similarity similarity, AtomicReaderContext readerContext, SpatialWeight w) throws IOException {
- super(similarity);
+ public SpatialScorer(AtomicReaderContext readerContext, SpatialWeight w) throws IOException {
+ super(w);
this.weight = w;
this.qWeight = w.getValue();
this.reader = readerContext.reader;
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/Grouping.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/Grouping.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/Grouping.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/Grouping.java Fri Jan 21 19:38:06 2011
@@ -162,7 +162,7 @@ public class Grouping {
// if we aren't going to return any groups, disregard the offset
if (numGroups == 0) maxGroupToFind = 0;
- collector = new TopGroupCollector(groupBy, context, normalizeSort(sort), maxGroupToFind);
+ collector = new TopGroupCollector(groupBy, context, searcher.weightSort(normalizeSort(sort)), maxGroupToFind);
/*** if we need a different algorithm when sort != group.sort
if (compareSorts(sort, groupSort)) {
@@ -185,9 +185,9 @@ public class Grouping {
int collectorOffset = format==Format.Simple ? 0 : offset;
if (groupBy instanceof StrFieldSource) {
- collector2 = new Phase2StringGroupCollector(collector, groupBy, context, groupSort, docsToCollect, needScores, collectorOffset);
+ collector2 = new Phase2StringGroupCollector(collector, groupBy, context, searcher.weightSort(groupSort), docsToCollect, needScores, collectorOffset);
} else {
- collector2 = new Phase2GroupCollector(collector, groupBy, context, groupSort, docsToCollect, needScores, collectorOffset);
+ collector2 = new Phase2GroupCollector(collector, groupBy, context, searcher.weightSort(groupSort), docsToCollect, needScores, collectorOffset);
}
return collector2;
}
@@ -306,11 +306,11 @@ public class Grouping {
return v;
}
- static TopDocsCollector newCollector(Sort sort, int numHits, boolean fillFields, boolean needScores) throws IOException {
+ TopDocsCollector newCollector(Sort sort, int numHits, boolean fillFields, boolean needScores) throws IOException {
if (sort==null || sort==byScoreDesc) {
return TopScoreDocCollector.create(numHits, true);
} else {
- return TopFieldCollector.create(sort, numHits, false, needScores, needScores, true);
+ return TopFieldCollector.create(searcher.weightSort(sort), numHits, false, needScores, needScores, true);
}
}
@@ -505,12 +505,12 @@ class TopGroupCollector extends GroupCol
int matches;
- public TopGroupCollector(ValueSource groupByVS, Map vsContext, Sort sort, int nGroups) throws IOException {
+ public TopGroupCollector(ValueSource groupByVS, Map vsContext, Sort weightedSort, int nGroups) throws IOException {
this.vs = groupByVS;
this.context = vsContext;
this.nGroups = nGroups = Math.max(1,nGroups); // we need a minimum of 1 for this collector
- SortField[] sortFields = sort.getSort();
+ SortField[] sortFields = weightedSort.getSort();
this.comparators = new FieldComparator[sortFields.length];
this.reversed = new int[sortFields.length];
for (int i = 0; i < sortFields.length; i++) {
@@ -719,7 +719,7 @@ class Phase2GroupCollector extends Colle
int docBase;
// TODO: may want to decouple from the phase1 collector
- public Phase2GroupCollector(TopGroupCollector topGroups, ValueSource groupByVS, Map vsContext, Sort sort, int docsPerGroup, boolean getScores, int offset) throws IOException {
+ public Phase2GroupCollector(TopGroupCollector topGroups, ValueSource groupByVS, Map vsContext, Sort weightedSort, int docsPerGroup, boolean getScores, int offset) throws IOException {
boolean getSortFields = false;
if (topGroups.orderedGroups == null)
@@ -733,10 +733,10 @@ class Phase2GroupCollector extends Colle
}
SearchGroupDocs groupDocs = new SearchGroupDocs();
groupDocs.groupValue = group.groupValue;
- if (sort==null)
+ if (weightedSort==null)
groupDocs.collector = TopScoreDocCollector.create(docsPerGroup, true);
else
- groupDocs.collector = TopFieldCollector.create(sort, docsPerGroup, getSortFields, getScores, getScores, true);
+ groupDocs.collector = TopFieldCollector.create(weightedSort, docsPerGroup, getSortFields, getScores, getScores, true);
groupMap.put(groupDocs.groupValue, groupDocs);
}
@@ -791,8 +791,8 @@ class Phase2StringGroupCollector extends
final SearchGroupDocs[] groups;
final BytesRef spare = new BytesRef();
- public Phase2StringGroupCollector(TopGroupCollector topGroups, ValueSource groupByVS, Map vsContext, Sort sort, int docsPerGroup, boolean getScores, int offset) throws IOException {
- super(topGroups, groupByVS, vsContext,sort,docsPerGroup,getScores,offset);
+ public Phase2StringGroupCollector(TopGroupCollector topGroups, ValueSource groupByVS, Map vsContext, Sort weightedSort, int docsPerGroup, boolean getScores, int offset) throws IOException {
+ super(topGroups, groupByVS, vsContext,weightedSort,docsPerGroup,getScores,offset);
ordSet = new SentinelIntSet(groupMap.size(), -1);
groups = new SearchGroupDocs[ordSet.keys.length];
}
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java Fri Jan 21 19:38:06 2011
@@ -61,7 +61,7 @@ public class SolrConstantScoreQuery exte
private Map context;
public ConstantWeight(IndexSearcher searcher) throws IOException {
- this.similarity = getSimilarity(searcher);
+ this.similarity = searcher.getSimilarity();
this.context = ValueSource.newContext(searcher);
if (filter instanceof SolrFilter)
((SolrFilter)filter).createWeight(context, searcher);
@@ -91,13 +91,13 @@ public class SolrConstantScoreQuery exte
@Override
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
- return new ConstantScorer(similarity, context, this);
+ return new ConstantScorer(context, this);
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
- ConstantScorer cs = new ConstantScorer(similarity, context, this);
+ ConstantScorer cs = new ConstantScorer(context, this);
boolean exists = cs.docIdSetIterator.advance(doc) == doc;
ComplexExplanation result = new ComplexExplanation();
@@ -124,8 +124,8 @@ public class SolrConstantScoreQuery exte
final float theScore;
int doc = -1;
- public ConstantScorer(Similarity similarity, AtomicReaderContext context, ConstantWeight w) throws IOException {
- super(similarity);
+ public ConstantScorer(AtomicReaderContext context, ConstantWeight w) throws IOException {
+ super(w);
theScore = w.getValue();
DocIdSet docIdSet = filter instanceof SolrFilter ? ((SolrFilter)filter).getDocIdSet(w.context, context) : filter.getDocIdSet(context);
if (docIdSet == null) {
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java Fri Jan 21 19:38:06 2011
@@ -460,6 +460,30 @@ public class SolrIndexSearcher extends I
return fieldValueCache;
}
+ /** Returns a weighted sort according to this searcher */
+ public Sort weightSort(Sort sort) throws IOException {
+ if (sort == null) return null;
+ SortField[] sorts = sort.getSort();
+
+ boolean needsWeighting = false;
+ for (SortField sf : sorts) {
+ if (sf instanceof SolrSortField) {
+ needsWeighting = true;
+ break;
+ }
+ }
+ if (!needsWeighting) return sort;
+
+ SortField[] newSorts = Arrays.copyOf(sorts, sorts.length);
+ for (int i=0; i<newSorts.length; i++) {
+ if (newSorts[i] instanceof SolrSortField) {
+ newSorts[i] = ((SolrSortField)newSorts[i]).weight(this);
+ }
+ }
+
+ return new Sort(newSorts);
+ }
+
/**
* Returns the first document number containing the term <code>t</code>
@@ -1156,7 +1180,7 @@ public class SolrIndexSearcher extends I
if (cmd.getSort() == null) {
topCollector = TopScoreDocCollector.create(len, true);
} else {
- topCollector = TopFieldCollector.create(cmd.getSort(), len, false, needScores, needScores, true);
+ topCollector = TopFieldCollector.create(weightSort(cmd.getSort()), len, false, needScores, needScores, true);
}
Collector collector = topCollector;
if( timeAllowed > 0 ) {
@@ -1266,7 +1290,7 @@ public class SolrIndexSearcher extends I
if (cmd.getSort() == null) {
topCollector = TopScoreDocCollector.create(len, true);
} else {
- topCollector = TopFieldCollector.create(cmd.getSort(), len, false, needScores, needScores, true);
+ topCollector = TopFieldCollector.create(weightSort(cmd.getSort()), len, false, needScores, needScores, true);
}
DocSetCollector setCollector = new DocSetDelegateCollector(maxDoc>>6, maxDoc, topCollector);
@@ -1548,7 +1572,7 @@ public class SolrIndexSearcher extends I
// bit of a hack to tell if a set is sorted - do it better in the futute.
boolean inOrder = set instanceof BitDocSet || set instanceof SortedIntDocSet;
- TopDocsCollector topCollector = TopFieldCollector.create(sort, nDocs, false, false, false, inOrder);
+ TopDocsCollector topCollector = TopFieldCollector.create(weightSort(sort), nDocs, false, false, false, inOrder);
DocIterator iter = set.iterator();
int base=0;
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/BoostedQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/BoostedQuery.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/BoostedQuery.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/BoostedQuery.java Fri Jan 21 19:38:06 2011
@@ -96,7 +96,7 @@ public class BoostedQuery extends Query
if(subQueryScorer == null) {
return null;
}
- return new BoostedQuery.CustomScorer(getSimilarity(searcher), context, this, subQueryScorer, boostVal);
+ return new BoostedQuery.CustomScorer(context, this, subQueryScorer, boostVal);
}
@Override
@@ -123,9 +123,9 @@ public class BoostedQuery extends Query
private final DocValues vals;
private final AtomicReaderContext readerContext;
- private CustomScorer(Similarity similarity, AtomicReaderContext readerContext, BoostedQuery.BoostedWeight w,
+ private CustomScorer(AtomicReaderContext readerContext, BoostedQuery.BoostedWeight w,
Scorer scorer, ValueSource vs) throws IOException {
- super(similarity);
+ super(w);
this.weight = w;
this.qWeight = w.getValue();
this.scorer = scorer;
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/FunctionQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/FunctionQuery.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/FunctionQuery.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/FunctionQuery.java Fri Jan 21 19:38:06 2011
@@ -95,7 +95,7 @@ public class FunctionQuery extends Query
@Override
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
- return new AllScorer(getSimilarity(searcher), context, this);
+ return new AllScorer(context, this);
}
@Override
@@ -114,8 +114,8 @@ public class FunctionQuery extends Query
final boolean hasDeletions;
final Bits delDocs;
- public AllScorer(Similarity similarity, AtomicReaderContext context, FunctionWeight w) throws IOException {
- super(similarity);
+ public AllScorer(AtomicReaderContext context, FunctionWeight w) throws IOException {
+ super(w);
this.weight = w;
this.qWeight = w.getValue();
this.reader = context.reader;
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/ValueSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/ValueSource.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/ValueSource.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/search/function/ValueSource.java Fri Jan 21 19:38:06 2011
@@ -26,12 +26,13 @@ import org.apache.lucene.search.IndexSea
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.Bits;
import org.apache.lucene.index.MultiFields;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.search.SolrSortField;
import java.io.IOException;
import java.io.Serializable;
import java.util.IdentityHashMap;
import java.util.Map;
-import java.util.Collections;
/**
* Instantiates {@link org.apache.solr.search.function.DocValues} for a particular reader.
@@ -61,24 +62,6 @@ public abstract class ValueSource implem
return description();
}
- /**
- * EXPERIMENTAL: This method is subject to change.
- * <br>WARNING: Sorted function queries are not currently weighted.
- * <p>
- * Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, AtomicReaderContext)}
- * to populate the SortField.
- *
- * @param reverse true if this is a reverse sort.
- * @return The {@link org.apache.lucene.search.SortField} for the ValueSource
- * @throws IOException if there was a problem reading the values.
- */
- public SortField getSortField(boolean reverse) throws IOException {
- //should we pass in the description for the field name?
- //Hmm, Lucene is going to intern whatever we pass in, not sure I like that
- //and we can't pass in null, either, as that throws an illegal arg. exception
- return new SortField(description(), new ValueSourceComparatorSource(), reverse);
- }
-
/**
* Implementations should propagate createWeight to sub-ValueSources which can optionally store
@@ -97,16 +80,56 @@ public abstract class ValueSource implem
return context;
}
- class ValueSourceComparatorSource extends FieldComparatorSource {
+ //
+ // Sorting by function
+ //
- public ValueSourceComparatorSource() {
+ /**
+ * EXPERIMENTAL: This method is subject to change.
+ * <br>WARNING: Sorted function queries are not currently weighted.
+ * <p>
+ * Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, AtomicReaderContext)}
+ * to populate the SortField.
+ *
+ * @param reverse true if this is a reverse sort.
+ * @return The {@link org.apache.lucene.search.SortField} for the ValueSource
+ * @throws IOException if there was a problem reading the values.
+ */
+ public SortField getSortField(boolean reverse) throws IOException {
+ return new ValueSourceSortField(reverse);
+ }
+
+ private static FieldComparatorSource dummyComparator = new FieldComparatorSource() {
+ @Override
+ public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unweighted use of sort " + fieldname);
+ }
+ };
+
+ class ValueSourceSortField extends SortField implements SolrSortField {
+ public ValueSourceSortField(boolean reverse) {
+ super(description(), dummyComparator, reverse);
+ }
+
+ @Override
+ public SortField weight(IndexSearcher searcher) throws IOException {
+ Map context = newContext(searcher);
+ createWeight(context, searcher);
+ return new SortField(getField(), new ValueSourceComparatorSource(context), getReverse());
+ }
+ }
+
+ class ValueSourceComparatorSource extends FieldComparatorSource {
+ private final Map context;
+ public ValueSourceComparatorSource(Map context) {
+ this.context = context;
}
public FieldComparator newComparator(String fieldname, int numHits,
int sortPos, boolean reversed) throws IOException {
- return new ValueSourceComparator(numHits);
+ return new ValueSourceComparator(context, numHits);
}
}
@@ -119,8 +142,10 @@ public abstract class ValueSource implem
private final double[] values;
private DocValues docVals;
private double bottom;
+ private Map fcontext;
- ValueSourceComparator(int numHits) {
+ ValueSourceComparator(Map fcontext, int numHits) {
+ this.fcontext = fcontext;
values = new double[numHits];
}
@@ -153,7 +178,7 @@ public abstract class ValueSource implem
}
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
- docVals = getValues(Collections.emptyMap(), context);
+ docVals = getValues(fcontext, context);
return this;
}
@@ -162,7 +187,7 @@ public abstract class ValueSource implem
}
public Comparable value(int slot) {
- return Double.valueOf(values[slot]);
+ return values[slot];
}
}
}
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java Fri Jan 21 19:38:06 2011
@@ -74,7 +74,6 @@ public class FileBasedSpellChecker exten
return null;
}
- @SuppressWarnings("unchecked")
private void loadExternalFileDictionary(SolrCore core) {
try {
@@ -92,7 +91,6 @@ public class FileBasedSpellChecker exten
new IndexWriterConfig(core.getSolrConfig().luceneMatchVersion, fieldType.getAnalyzer()).
setMaxBufferedDocs(150).
setMergePolicy(mp).
- setMaxFieldLength(IndexWriterConfig.UNLIMITED_FIELD_LENGTH).
setOpenMode(IndexWriterConfig.OpenMode.CREATE)
);
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/update/SolrIndexConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/update/SolrIndexConfig.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/update/SolrIndexConfig.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/update/SolrIndexConfig.java Fri Jan 21 19:38:06 2011
@@ -53,7 +53,6 @@ public class SolrIndexConfig {
maxMergeDocs = -1;
mergeFactor = -1;
ramBufferSizeMB = 16;
- maxFieldLength = -1;
writeLockTimeout = -1;
commitLockTimeout = -1;
lockType = null;
@@ -71,7 +70,6 @@ public class SolrIndexConfig {
public final double ramBufferSizeMB;
- public final int maxFieldLength;
public final int writeLockTimeout;
public final int commitLockTimeout;
public final String lockType;
@@ -95,7 +93,6 @@ public class SolrIndexConfig {
mergeFactor=solrConfig.getInt(prefix+"/mergeFactor",def.mergeFactor);
ramBufferSizeMB = solrConfig.getDouble(prefix+"/ramBufferSizeMB", def.ramBufferSizeMB);
- maxFieldLength=solrConfig.getInt(prefix+"/maxFieldLength",def.maxFieldLength);
writeLockTimeout=solrConfig.getInt(prefix+"/writeLockTimeout", def.writeLockTimeout);
commitLockTimeout=solrConfig.getInt(prefix+"/commitLockTimeout", def.commitLockTimeout);
lockType=solrConfig.get(prefix+"/lockType", def.lockType);
@@ -153,9 +150,6 @@ public class SolrIndexConfig {
if (termIndexInterval != -1)
iwc.setTermIndexInterval(termIndexInterval);
- if (maxFieldLength != -1)
- iwc.setMaxFieldLength(maxFieldLength);
-
if (writeLockTimeout != -1)
iwc.setWriteLockTimeout(writeLockTimeout);
Modified: lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/util/HighFrequencyDictionary.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/util/HighFrequencyDictionary.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/util/HighFrequencyDictionary.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/java/org/apache/solr/util/HighFrequencyDictionary.java Fri Jan 21 19:38:06 2011
@@ -76,7 +76,11 @@ public class HighFrequencyDictionary imp
}
public float freq() {
- return termsEnum.docFreq();
+ try {
+ return termsEnum.docFreq();
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe);
+ }
}
public String next() {
@@ -112,8 +116,12 @@ public class HighFrequencyDictionary imp
}
// got a valid term, does it pass the threshold?
- if (isFrequent(termsEnum.docFreq())) {
- return true;
+ try {
+ if (isFrequent(termsEnum.docFreq())) {
+ return true;
+ }
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe);
}
}
}
Modified: lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema-copyfield-test.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema-copyfield-test.xml?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema-copyfield-test.xml (original)
+++ lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema-copyfield-test.xml Fri Jan 21 19:38:06 2011
@@ -202,13 +202,14 @@
<fieldtype name="engporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="custengporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
- <filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="stopfilt" class="solr.TextField">
Modified: lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema-required-fields.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema-required-fields.xml?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema-required-fields.xml (original)
+++ lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema-required-fields.xml Fri Jan 21 19:38:06 2011
@@ -193,13 +193,14 @@
<fieldtype name="engporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="custengporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
- <filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="stopfilt" class="solr.TextField">
Modified: lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema.xml?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema.xml (original)
+++ lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema.xml Fri Jan 21 19:38:06 2011
@@ -236,13 +236,14 @@
<fieldtype name="engporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="custengporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
- <filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="stopfilt" class="solr.TextField">
Modified: lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema12.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema12.xml?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema12.xml (original)
+++ lucene/dev/branches/realtime_search/solr/src/test-files/solr/conf/schema12.xml Fri Jan 21 19:38:06 2011
@@ -252,13 +252,14 @@
<fieldtype name="engporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="custengporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
- <filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="stopfilt" class="solr.TextField">
@@ -286,14 +287,14 @@
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
@@ -303,14 +304,14 @@
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.StopFilterFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.StopFilterFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
@@ -375,7 +376,7 @@
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -384,7 +385,7 @@
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
@@ -397,7 +398,7 @@
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory"
synonyms="synonyms.txt" expand="true" />
- <filter class="solr.EnglishPorterFilterFactory"/>
+ <filter class="solr.PorterStemFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory" />
</analyzer>
</fieldtype>
Modified: lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/TestDistributedSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/TestDistributedSearch.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/TestDistributedSearch.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/TestDistributedSearch.java Fri Jan 21 19:38:06 2011
@@ -95,6 +95,7 @@ public class TestDistributedSearch exten
// these queries should be exactly ordered and scores should exactly match
query("q","*:*", "sort",i1+" desc");
+ query("q","*:*", "sort","{!func}add("+i1+",5)"+" desc");
query("q","*:*", "sort",i1+" asc");
query("q","*:*", "sort",i1+" desc", "fl","*,score");
query("q","*:*", "sort",tlong+" asc", "fl","score"); // test legacy behavior - "score"=="*,score"
Modified: lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/analysis/SnowballPorterFilterFactoryTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/analysis/SnowballPorterFilterFactoryTest.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/analysis/SnowballPorterFilterFactoryTest.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/analysis/SnowballPorterFilterFactoryTest.java Fri Jan 21 19:38:06 2011
@@ -33,7 +33,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.ArrayList;
-import java.util.Collections;
public class SnowballPorterFilterFactoryTest extends BaseTokenTestCase {
@@ -59,37 +58,6 @@ public class SnowballPorterFilterFactory
assertTokenStreamContents(stream, gold);
}
- /**
- * Tests the protected words mechanism of EnglishPorterFilterFactory
- */
- @Deprecated
- public void testProtectedOld() throws Exception {
- EnglishStemmer stemmer = new EnglishStemmer();
- String[] test = {"The", "fledgling", "banks", "were", "counting", "on", "a", "big", "boom", "in", "banking"};
- String[] gold = new String[test.length];
- for (int i = 0; i < test.length; i++) {
- if (test[i].equals("fledgling") == false && test[i].equals("banks") == false) {
- stemmer.setCurrent(test[i]);
- stemmer.stem();
- gold[i] = stemmer.getCurrent();
- } else {
- gold[i] = test[i];
- }
- }
-
- EnglishPorterFilterFactory factory = new EnglishPorterFilterFactory();
- Map<String, String> args = new HashMap<String, String>(DEFAULT_VERSION_PARAM);
- args.put(SnowballPorterFilterFactory.PROTECTED_TOKENS, "who-cares.txt");
- factory.init(args);
- List<String> lines = new ArrayList<String>();
- Collections.addAll(lines, "banks", "fledgling");
- factory.inform(new LinesMockSolrResourceLoader(lines));
- Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION,
- new StringReader(StrUtils.join(Arrays.asList(test), ' ')));
- TokenStream stream = factory.create(tokenizer);
- assertTokenStreamContents(stream, gold);
- }
-
class LinesMockSolrResourceLoader implements ResourceLoader {
List<String> lines;
Modified: lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/core/TestArbitraryIndexDir.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/core/TestArbitraryIndexDir.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/core/TestArbitraryIndexDir.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/core/TestArbitraryIndexDir.java Fri Jan 21 19:38:06 2011
@@ -99,8 +99,7 @@ public class TestArbitraryIndexDir exten
Directory dir = newFSDirectory(newDir);
IndexWriter iw = new IndexWriter(
dir,
- new IndexWriterConfig(Version.LUCENE_40, new StandardAnalyzer(Version.LUCENE_40)).
- setMaxFieldLength(1000)
+ new IndexWriterConfig(Version.LUCENE_40, new StandardAnalyzer(Version.LUCENE_40))
);
Document doc = new Document();
doc.add(new Field("id", "2", Field.Store.YES, Field.Index.ANALYZED));
Modified: lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/request/JSONWriterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/request/JSONWriterTest.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/request/JSONWriterTest.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/request/JSONWriterTest.java Fri Jan 21 19:38:06 2011
@@ -66,21 +66,6 @@ public class JSONWriterTest extends Solr
}
@Test
- public void testPHPS() throws IOException {
- SolrQueryRequest req = req("dummy");
- SolrQueryResponse rsp = new SolrQueryResponse();
- QueryResponseWriter w = new PHPSerializedResponseWriter();
-
- StringWriter buf = new StringWriter();
- rsp.add("data1", "hello");
- rsp.add("data2", 42);
- rsp.add("data3", true);
- w.write(buf, req, rsp);
- assertEquals(buf.toString(), "a:3:{s:5:\"data1\";s:5:\"hello\";s:5:\"data2\";i:42;s:5:\"data3\";b:1;}");
- req.close();
- }
-
- @Test
public void testJSON() throws IOException {
SolrQueryRequest req = req("wt","json","json.nl","arrarr");
SolrQueryResponse rsp = new SolrQueryResponse();
Modified: lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/request/SimpleFacetsTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/request/SimpleFacetsTest.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/request/SimpleFacetsTest.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/request/SimpleFacetsTest.java Fri Jan 21 19:38:06 2011
@@ -169,6 +169,16 @@ public class SimpleFacetsTest extends So
,"//lst[@name='trait_s']/int[@name='Pig'][.='1']"
);
+ // test excluding main query
+ assertQ(req("q", "{!tag=main}id:43"
+ ,"facet", "true"
+ ,"facet.query", "{!key=foo}id:42"
+ ,"facet.query", "{!ex=main key=bar}id:42" // only matches when we exclude main query
+ )
+ ,"//lst[@name='facet_queries']/int[@name='foo'][.='0']"
+ ,"//lst[@name='facet_queries']/int[@name='bar'][.='1']"
+ );
+
assertQ("check counts for applied facet queries using filtering (fq)",
req("q", "id:[42 TO 47]"
,"facet", "true"
Modified: lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/search/TestSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/search/TestSort.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/search/TestSort.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/search/TestSort.java Fri Jan 21 19:38:06 2011
@@ -63,8 +63,7 @@ public class TestSort extends AbstractSo
IndexWriter iw = new IndexWriter(
dir,
new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)).
- setOpenMode(IndexWriterConfig.OpenMode.CREATE).
- setMaxFieldLength(IndexWriterConfig.UNLIMITED_FIELD_LENGTH)
+ setOpenMode(IndexWriterConfig.OpenMode.CREATE)
);
final MyDoc[] mydocs = new MyDoc[ndocs];
Modified: lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java Fri Jan 21 19:38:06 2011
@@ -326,17 +326,18 @@ public class TestFunctionQuery extends S
assertU(adoc("id",""+i, "text","batman"));
}
assertU(commit());
- assertU(adoc("id","120", "text","batman superman")); // in a segment by itself
+ assertU(adoc("id","120", "text","batman superman")); // in a smaller segment
+ assertU(adoc("id","121", "text","superman"));
assertU(commit());
- // batman and superman have the same idf in single-doc segment, but very different in the complete index.
+ // superman has a higher df (thus lower idf) in one segment, but reversed in the complete index
String q ="{!func}query($qq)";
String fq="id:120";
assertQ(req("fl","*,score","q", q, "qq","text:batman", "fq",fq), "//float[@name='score']<'1.0'");
assertQ(req("fl","*,score","q", q, "qq","text:superman", "fq",fq), "//float[@name='score']>'1.0'");
// test weighting through a function range query
- assertQ(req("fl","*,score", "q", "{!frange l=1 u=10}query($qq)", "qq","text:superman"), "//*[@numFound='1']");
+ assertQ(req("fl","*,score", "fq",fq, "q", "{!frange l=1 u=10}query($qq)", "qq","text:superman"), "//*[@numFound='1']");
// test weighting through a complex function
q ="{!func}sub(div(sum(0.0,product(1,query($qq))),1),0)";
@@ -360,6 +361,14 @@ public class TestFunctionQuery extends S
// OK
}
+ // test that sorting by function weights correctly. superman should sort higher than batman due to idf of the whole index
+
+ assertQ(req("q", "*:*", "fq","id:120 OR id:121", "sort","{!func v=$sortfunc} desc", "sortfunc","query($qq)", "qq","text:(batman OR superman)")
+ ,"*//doc[1]/float[.='120.0']"
+ ,"*//doc[2]/float[.='121.0']"
+ );
+
+
purgeFieldCache(FieldCache.DEFAULT); // avoid FC insanity
}
Modified: lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java?rev=1061979&r1=1061978&r2=1061979&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java (original)
+++ lucene/dev/branches/realtime_search/solr/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java Fri Jan 21 19:38:06 2011
@@ -284,8 +284,7 @@ public class IndexBasedSpellCheckerTest
Directory dir = newFSDirectory(altIndexDir);
IndexWriter iw = new IndexWriter(
dir,
- new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).
- setMaxFieldLength(IndexWriterConfig.UNLIMITED_FIELD_LENGTH)
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
);
for (int i = 0; i < ALT_DOCS.length; i++) {
Document doc = new Document();