You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2013/10/21 20:58:44 UTC

svn commit: r1534320 [31/39] - in /lucene/dev/branches/lucene4956: ./ dev-tools/ dev-tools/idea/.idea/ dev-tools/idea/lucene/expressions/ dev-tools/idea/solr/contrib/velocity/ dev-tools/maven/ dev-tools/maven/lucene/ dev-tools/maven/lucene/expressions/...

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java Mon Oct 21 18:58:24 2013
@@ -44,6 +44,7 @@ import org.apache.solr.common.SolrExcept
 import org.apache.solr.common.params.QueryElevationParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.search.grouping.GroupingSpecification;
 import org.apache.solr.util.DOMUtil;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
@@ -343,16 +344,16 @@ public class QueryElevationComponent ext
       return query;
     }
     StringBuilder norm = new StringBuilder();
-    TokenStream tokens = analyzer.tokenStream("", query);
-    tokens.reset();
+    try (TokenStream tokens = analyzer.tokenStream("", query)) {
+      tokens.reset();
 
-    CharTermAttribute termAtt = tokens.addAttribute(CharTermAttribute.class);
-    while (tokens.incrementToken()) {
-      norm.append(termAtt.buffer(), 0, termAtt.length());
-    }
-    tokens.end();
-    tokens.close();
-    return norm.toString();
+      CharTermAttribute termAtt = tokens.addAttribute(CharTermAttribute.class);
+      while (tokens.incrementToken()) {
+        norm.append(termAtt.buffer(), 0, termAtt.length());
+      }
+      tokens.end();
+      return norm.toString();
+    }
   }
 
   //---------------------------------------------------------------------------------
@@ -424,23 +425,25 @@ public class QueryElevationComponent ext
         }));
       } else {
         // Check if the sort is based on score
-        boolean modify = false;
         SortField[] current = sortSpec.getSort().getSort();
-        ArrayList<SortField> sorts = new ArrayList<SortField>(current.length + 1);
-        // Perhaps force it to always sort by score
-        if (force && current[0].getType() != SortField.Type.SCORE) {
-          sorts.add(new SortField("_elevate_", comparator, true));
-          modify = true;
-        }
-        for (SortField sf : current) {
-          if (sf.getType() == SortField.Type.SCORE) {
-            sorts.add(new SortField("_elevate_", comparator, !sf.getReverse()));
-            modify = true;
-          }
-          sorts.add(sf);
+        Sort modified = this.modifySort(current, force, comparator);
+        if(modified != null) {
+          sortSpec.setSort(modified);
         }
-        if (modify) {
-          sortSpec.setSort(new Sort(sorts.toArray(new SortField[sorts.size()])));
+      }
+
+      // alter the sorting in the grouping specification if there is one
+      GroupingSpecification groupingSpec = rb.getGroupingSpec();
+      if(groupingSpec != null) {
+        SortField[] groupSort = groupingSpec.getGroupSort().getSort();
+        Sort modGroupSort = this.modifySort(groupSort, force, comparator);
+        if(modGroupSort != null) {
+          groupingSpec.setGroupSort(modGroupSort);
+        }
+        SortField[] withinGroupSort = groupingSpec.getSortWithinGroup().getSort();
+        Sort modWithinGroupSort = this.modifySort(withinGroupSort, force, comparator);
+        if(modWithinGroupSort != null) {
+          groupingSpec.setSortWithinGroup(modWithinGroupSort);
         }
       }
     }
@@ -466,6 +469,25 @@ public class QueryElevationComponent ext
     }
   }
 
+  private Sort modifySort(SortField[] current, boolean force, ElevationComparatorSource comparator) {
+    boolean modify = false;
+    ArrayList<SortField> sorts = new ArrayList<SortField>(current.length + 1);
+    // Perhaps force it to always sort by score
+    if (force && current[0].getType() != SortField.Type.SCORE) {
+      sorts.add(new SortField("_elevate_", comparator, true));
+      modify = true;
+    }
+    for (SortField sf : current) {
+      if (sf.getType() == SortField.Type.SCORE) {
+        sorts.add(new SortField("_elevate_", comparator, !sf.getReverse()));
+        modify = true;
+      }
+      sorts.add(sf);
+    }
+
+    return modify ? new Sort(sorts.toArray(new SortField[sorts.size()])) : null;
+  }
+
   @Override
   public void process(ResponseBuilder rb) throws IOException {
     // Do nothing -- the real work is modifying the input query

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java Mon Oct 21 18:58:24 2013
@@ -463,29 +463,29 @@ public class SpellCheckComponent extends
   private Collection<Token> getTokens(String q, Analyzer analyzer) throws IOException {
     Collection<Token> result = new ArrayList<Token>();
     assert analyzer != null;
-    TokenStream ts = analyzer.tokenStream("", q);
-    ts.reset();
-    // TODO: support custom attributes
-    CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
-    OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
-    TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
-    FlagsAttribute flagsAtt = ts.addAttribute(FlagsAttribute.class);
-    PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
-    PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
-    
-    while (ts.incrementToken()){
-      Token token = new Token();
-      token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
-      token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
-      token.setType(typeAtt.type());
-      token.setFlags(flagsAtt.getFlags());
-      token.setPayload(payloadAtt.getPayload());
-      token.setPositionIncrement(posIncAtt.getPositionIncrement());
-      result.add(token);
+    try (TokenStream ts = analyzer.tokenStream("", q)) {
+      ts.reset();
+      // TODO: support custom attributes
+      CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+      OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
+      TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
+      FlagsAttribute flagsAtt = ts.addAttribute(FlagsAttribute.class);
+      PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
+      PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
+      
+      while (ts.incrementToken()){
+        Token token = new Token();
+        token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
+        token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
+        token.setType(typeAtt.type());
+        token.setFlags(flagsAtt.getFlags());
+        token.setPayload(payloadAtt.getPayload());
+        token.setPositionIncrement(posIncAtt.getPositionIncrement());
+        result.add(token);
+      }
+      ts.end();
+      return result;
     }
-    ts.end();
-    ts.close();
-    return result;
   }
 
   protected SolrSpellChecker getSpellChecker(SolrParams params) {

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java Mon Oct 21 18:58:24 2013
@@ -37,6 +37,9 @@ import org.slf4j.LoggerFactory;
 import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
 
 /**
  * Update handler which uses the JavaBin format
@@ -97,7 +100,7 @@ public class JavabinLoader extends Conte
       } catch (EOFException e) {
         break; // this is expected
       }
-      if (update.getDeleteById() != null || update.getDeleteQuery() != null) {
+      if (update.getDeleteByIdMap() != null || update.getDeleteQuery() != null) {
         delete(req, update, processor);
       }
     }
@@ -118,9 +121,17 @@ public class JavabinLoader extends Conte
       delcmd.commitWithin = params.getInt(UpdateParams.COMMIT_WITHIN, -1);
     }
     
-    if(update.getDeleteById() != null) {
-      for (String s : update.getDeleteById()) {
-        delcmd.id = s;
+    if(update.getDeleteByIdMap() != null) {
+      Set<Entry<String,Map<String,Object>>> entries = update.getDeleteByIdMap().entrySet();
+      for (Entry<String,Map<String,Object>> e : entries) {
+        delcmd.id = e.getKey();
+        Map<String,Object> map = e.getValue();
+        if (map != null) {
+          Long version = (Long) map.get("ver");
+          if (version != null) {
+            delcmd.setVersion(version);
+          }
+        }
         processor.processDelete(delcmd);
         delcmd.clear();
       }

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java Mon Oct 21 18:58:24 2013
@@ -691,6 +691,11 @@ final class TokenOrderingFilter extends 
       return true;
     }
   }
+
+  @Override
+  public void reset() throws IOException {
+    // this looks wrong: but its correct.
+  }
 }
 
 // for TokenOrderingFilter, so it can easily sort by startOffset

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/logging/LogWatcher.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/logging/LogWatcher.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/logging/LogWatcher.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/logging/LogWatcher.java Mon Oct 21 18:58:24 2013
@@ -126,8 +126,10 @@ public abstract class LogWatcher<E> {
    */
   public static LogWatcher newRegisteredLogWatcher(LogWatcherConfig config, SolrResourceLoader loader) {
 
-    if (!config.isEnabled())
+    if (!config.isEnabled()) {
+      log.info("A LogWatcher is not enabled");
       return null;
+    }
 
     LogWatcher logWatcher = createWatcher(config, loader);
 
@@ -148,6 +150,7 @@ public abstract class LogWatcher<E> {
 
     try {
       slf4jImpl = StaticLoggerBinder.getSingleton().getLoggerFactoryClassStr();
+      log.info("SLF4J impl is " + slf4jImpl);
       if (fname == null) {
         if (slf4jImpl.indexOf("Log4j") > 0) {
           fname = "Log4j";

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/logging/log4j/Log4jWatcher.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/logging/log4j/Log4jWatcher.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/logging/log4j/Log4jWatcher.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/logging/log4j/Log4jWatcher.java Mon Oct 21 18:58:24 2013
@@ -152,7 +152,7 @@ public class Log4jWatcher extends LogWat
     doc.setField("time", new Date(event.getTimeStamp()));
     doc.setField("level", event.getLevel().toString());
     doc.setField("logger", event.getLogger().getName());
-    doc.setField("message", event.getMessage().toString());
+    doc.setField("message", event.getRenderedMessage());
     ThrowableInformation t = event.getThrowableInformation();
     if(t!=null) {
       doc.setField("trace", Throwables.getStackTraceAsString(t.getThrowable()));

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java Mon Oct 21 18:58:24 2013
@@ -18,10 +18,6 @@
 package org.apache.solr.parser;
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.CachingTokenFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.AutomatonQuery;
@@ -35,9 +31,8 @@ import org.apache.lucene.search.PhraseQu
 import org.apache.lucene.search.PrefixQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.RegexpQuery;
-import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.QueryBuilder;
 import org.apache.lucene.util.ToStringUtils;
 import org.apache.lucene.util.Version;
 import org.apache.lucene.util.automaton.Automaton;
@@ -55,9 +50,7 @@ import org.apache.solr.schema.TextField;
 import org.apache.solr.search.QParser;
 import org.apache.solr.search.SyntaxError;
 
-import java.io.IOException;
 import java.io.StringReader;
-import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
@@ -66,7 +59,7 @@ import java.util.Map;
 /** This class is overridden by QueryParser in QueryParser.jj
  * and acts to separate the majority of the Java code from the .jj grammar file. 
  */
-public abstract class SolrQueryParserBase {
+public abstract class SolrQueryParserBase extends QueryBuilder {
 
 
   static final int CONJ_NONE   = 0;
@@ -89,7 +82,6 @@ public abstract class SolrQueryParserBas
 
   MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
   boolean allowLeadingWildcard = true;
-  boolean enablePositionIncrements = true;
 
   String defaultField;
   int phraseSlop = 0;     // default slop for phrase queries
@@ -101,7 +93,6 @@ public abstract class SolrQueryParserBas
 
   protected IndexSchema schema;
   protected QParser parser;
-  protected Analyzer analyzer;
 
   // implementation detail - caching ReversedWildcardFilterFactory based on type
   private Map<FieldType, ReversedWildcardFilterFactory> leadingWildcards;
@@ -137,6 +128,7 @@ public abstract class SolrQueryParserBas
 
   // So the generated QueryParser(CharStream) won't error out
   protected SolrQueryParserBase() {
+    super(null);
   }
   // the generated parser will create these in QueryParser
   public abstract void ReInit(CharStream stream);
@@ -147,7 +139,7 @@ public abstract class SolrQueryParserBas
     this.schema = parser.getReq().getSchema();
     this.parser = parser;
     this.defaultField = defaultField;
-    this.analyzer = schema.getQueryAnalyzer();
+    setAnalyzer(schema.getQueryAnalyzer());
   }
 
     /** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
@@ -282,27 +274,6 @@ public abstract class SolrQueryParserBas
   }
 
   /**
-   * Set to <code>true</code> to enable position increments in result query.
-   * <p>
-   * When set, result phrase and multi-phrase queries will
-   * be aware of position increments.
-   * Useful when e.g. a StopFilter increases the position increment of
-   * the token that follows an omitted token.
-   * <p>
-   * Default: true.
-   */
-  public void setEnablePositionIncrements(boolean enable) {
-    this.enablePositionIncrements = enable;
-  }
-
-  /**
-   * @see #setEnablePositionIncrements(boolean)
-   */
-  public boolean getEnablePositionIncrements() {
-    return enablePositionIncrements;
-  }
-
-  /**
    * Sets the boolean operator of the QueryParser.
    * In default mode (<code>OR_OPERATOR</code>) terms without any modifiers
    * are considered optional: for example <code>capital of Hungary</code> is equal to
@@ -400,165 +371,8 @@ public abstract class SolrQueryParserBas
 
 
   protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, boolean quoted)  throws SyntaxError {
-    // Use the analyzer to get all the tokens, and then build a TermQuery,
-    // PhraseQuery, or nothing based on the term count
-
-    TokenStream source;
-    try {
-      source = analyzer.tokenStream(field, queryText);
-      source.reset();
-    } catch (IOException e) {
-      throw new SyntaxError("Unable to initialize TokenStream to analyze query text", e);
-    }
-    CachingTokenFilter buffer = new CachingTokenFilter(source);
-    TermToBytesRefAttribute termAtt = null;
-    PositionIncrementAttribute posIncrAtt = null;
-    int numTokens = 0;
-
-    buffer.reset();
-
-    if (buffer.hasAttribute(TermToBytesRefAttribute.class)) {
-      termAtt = buffer.getAttribute(TermToBytesRefAttribute.class);
-    }
-    if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
-      posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
-    }
-
-    int positionCount = 0;
-    boolean severalTokensAtSamePosition = false;
-
-    boolean hasMoreTokens = false;
-    if (termAtt != null) {
-      try {
-        hasMoreTokens = buffer.incrementToken();
-        while (hasMoreTokens) {
-          numTokens++;
-          int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
-          if (positionIncrement != 0) {
-            positionCount += positionIncrement;
-          } else {
-            severalTokensAtSamePosition = true;
-          }
-          hasMoreTokens = buffer.incrementToken();
-        }
-      } catch (IOException e) {
-        // ignore
-      }
-    }
-    try {
-      // rewind the buffer stream
-      buffer.reset();
-
-      // close original stream - all tokens buffered
-      source.close();
-    }
-    catch (IOException e) {
-      throw new SyntaxError("Cannot close TokenStream analyzing query text", e);
-    }
-
-    BytesRef bytes = termAtt == null ? null : termAtt.getBytesRef();
-
-    if (numTokens == 0)
-      return null;
-    else if (numTokens == 1) {
-      try {
-        boolean hasNext = buffer.incrementToken();
-        assert hasNext == true;
-        termAtt.fillBytesRef();
-      } catch (IOException e) {
-        // safe to ignore, because we know the number of tokens
-      }
-      return newTermQuery(new Term(field, BytesRef.deepCopyOf(bytes)));
-    } else {
-      if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries)) {
-        if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries)) {
-          // no phrase query:
-          BooleanQuery q = newBooleanQuery(positionCount == 1);
-
-          BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
-            BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
-
-          for (int i = 0; i < numTokens; i++) {
-            try {
-              boolean hasNext = buffer.incrementToken();
-              assert hasNext == true;
-              termAtt.fillBytesRef();
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
-            }
-            Query currentQuery = newTermQuery(
-                new Term(field, BytesRef.deepCopyOf(bytes)));
-            q.add(currentQuery, occur);
-          }
-          return q;
-        }
-        else {
-          // phrase query:
-          MultiPhraseQuery mpq = newMultiPhraseQuery();
-          mpq.setSlop(phraseSlop);
-          List<Term> multiTerms = new ArrayList<Term>();
-          int position = -1;
-          for (int i = 0; i < numTokens; i++) {
-            int positionIncrement = 1;
-            try {
-              boolean hasNext = buffer.incrementToken();
-              assert hasNext == true;
-              termAtt.fillBytesRef();
-              if (posIncrAtt != null) {
-                positionIncrement = posIncrAtt.getPositionIncrement();
-              }
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
-            }
-
-            if (positionIncrement > 0 && multiTerms.size() > 0) {
-              if (enablePositionIncrements) {
-                mpq.add(multiTerms.toArray(new Term[0]),position);
-              } else {
-                mpq.add(multiTerms.toArray(new Term[0]));
-              }
-              multiTerms.clear();
-            }
-            position += positionIncrement;
-            multiTerms.add(new Term(field, BytesRef.deepCopyOf(bytes)));
-          }
-          if (enablePositionIncrements) {
-            mpq.add(multiTerms.toArray(new Term[0]),position);
-          } else {
-            mpq.add(multiTerms.toArray(new Term[0]));
-          }
-          return mpq;
-        }
-      }
-      else {
-        PhraseQuery pq = newPhraseQuery();
-        pq.setSlop(phraseSlop);
-        int position = -1;
-
-        for (int i = 0; i < numTokens; i++) {
-          int positionIncrement = 1;
-
-          try {
-            boolean hasNext = buffer.incrementToken();
-            assert hasNext == true;
-            termAtt.fillBytesRef();
-            if (posIncrAtt != null) {
-              positionIncrement = posIncrAtt.getPositionIncrement();
-            }
-          } catch (IOException e) {
-            // safe to ignore, because we know the number of tokens
-          }
-
-          if (enablePositionIncrements) {
-            position += positionIncrement;
-            pq.add(new Term(field, BytesRef.deepCopyOf(bytes)),position);
-          } else {
-            pq.add(new Term(field, BytesRef.deepCopyOf(bytes)));
-          }
-        }
-        return pq;
-      }
-    }
+    BooleanClause.Occur occur = operator == Operator.AND ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
+    return createFieldQuery(analyzer, occur, field, queryText, quoted || autoGeneratePhraseQueries, phraseSlop);
   }
 
 
@@ -589,16 +403,6 @@ public abstract class SolrQueryParserBas
     return query;
   }
 
-
- /**
-  * Builds a new BooleanQuery instance
-  * @param disableCoord disable coord
-  * @return new BooleanQuery instance
-  */
-  protected BooleanQuery newBooleanQuery(boolean disableCoord) {
-    return new BooleanQuery(disableCoord);
-  }
-
  /**
   * Builds a new BooleanClause instance
   * @param q sub query
@@ -610,31 +414,6 @@ public abstract class SolrQueryParserBas
   }
 
   /**
-   * Builds a new TermQuery instance
-   * @param term term
-   * @return new TermQuery instance
-   */
-  protected Query newTermQuery(Term term){
-    return new TermQuery(term);
-  }
-
-  /**
-   * Builds a new PhraseQuery instance
-   * @return new PhraseQuery instance
-   */
-  protected PhraseQuery newPhraseQuery(){
-    return new PhraseQuery();
-  }
-
-  /**
-   * Builds a new MultiPhraseQuery instance
-   * @return new MultiPhraseQuery instance
-   */
-  protected MultiPhraseQuery newMultiPhraseQuery(){
-    return new MultiPhraseQuery();
-  }
-
-  /**
    * Builds a new PrefixQuery instance
    * @param prefix Prefix term
    * @return new PrefixQuery instance
@@ -956,14 +735,14 @@ public abstract class SolrQueryParserBas
       FieldType ft = sf.getType();
       // delegate to type for everything except tokenized fields
       if (ft.isTokenized() && sf.indexed()) {
-        return newFieldQuery(analyzer, field, queryText, quoted || (ft instanceof TextField && ((TextField)ft).getAutoGeneratePhraseQueries()));
+        return newFieldQuery(getAnalyzer(), field, queryText, quoted || (ft instanceof TextField && ((TextField)ft).getAutoGeneratePhraseQueries()));
       } else {
         return sf.getType().getFieldQuery(parser, sf, queryText);
       }
     }
 
     // default to a normal field query
-    return newFieldQuery(analyzer, field, queryText, quoted);
+    return newFieldQuery(getAnalyzer(), field, queryText, quoted);
   }
 
 

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java Mon Oct 21 18:58:24 2013
@@ -218,11 +218,7 @@ public class DocValuesFacets {
   static NamedList<Integer> finalize(NamedList<Integer> res, SolrIndexSearcher searcher, SchemaField schemaField, DocSet docs, int missingCount, boolean missing) throws IOException {
     if (missing) {
       if (missingCount < 0) {
-        if (schemaField.multiValued()) {
-          missingCount = SimpleFacets.getFieldMissingCount(searcher,docs,schemaField.getName());
-        } else {
-          missingCount = 0; // single-valued dv is implicitly 0
-        }
+        missingCount = SimpleFacets.getFieldMissingCount(searcher,docs,schemaField.getName());
       }
       res.add(null, missingCount);
     }
@@ -231,12 +227,12 @@ public class DocValuesFacets {
   }
   
   /** accumulates per-segment single-valued facet counts, mapping to global ordinal space */
-  // specialized since the single-valued case is simpler: you don't have to deal with missing count, etc
+  // specialized since the single-valued case is different
   static void accumSingle(int counts[], int startTermIndex, SortedDocValues si, DocIdSetIterator disi, int subIndex, OrdinalMap map) throws IOException {
     int doc;
     while ((doc = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
       int term = si.getOrd(doc);
-      if (map != null) {
+      if (map != null && term >= 0) {
         term = (int) map.getGlobalOrd(subIndex, term);
       }
       int arrIdx = term-startTermIndex;

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/NumericFacets.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/NumericFacets.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/NumericFacets.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/NumericFacets.java Mon Oct 21 18:58:24 2013
@@ -190,8 +190,9 @@ final class NumericFacets {
         }
         docsWithField = FieldCache.DEFAULT.getDocsWithField(ctx.reader(), fieldName);
       }
-      if (docsWithField.get(doc - ctx.docBase)) {
-        hashTable.add(doc, longs.get(doc - ctx.docBase), 1);
+      long v = longs.get(doc - ctx.docBase);
+      if (v != 0 || docsWithField.get(doc - ctx.docBase)) {
+        hashTable.add(doc, v, 1);
       } else {
         ++missingCount;
       }
@@ -254,7 +255,7 @@ final class NumericFacets {
 
       if (zeros && (limit < 0 || result.size() < limit)) { // need to merge with the term dict
         if (!sf.indexed()) {
-          throw new IllegalStateException("Cannot use " + FacetParams.FACET_MINCOUNT + "=0 on a field which is not indexed");
+          throw new IllegalStateException("Cannot use " + FacetParams.FACET_MINCOUNT + "=0 on field " + sf.getName() + " which is not indexed");
         }
         // Add zeros until there are limit results
         final Set<String> alreadySeen = new HashSet<String>();

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java Mon Oct 21 18:58:24 2013
@@ -174,10 +174,10 @@ class PerSegmentSingleValuedFaceting {
     while (queue.size() > 0) {
       SegFacet seg = queue.top();
 
-      // make a shallow copy
-      val.bytes = seg.tempBR.bytes;
-      val.offset = seg.tempBR.offset;
-      val.length = seg.tempBR.length;
+      // we will normally end up advancing the term enum for this segment
+      // while still using "val", so we need to make a copy since the BytesRef
+      // may be shared across calls.
+      val.copyBytes(seg.tempBR);
 
       int count = 0;
 

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/SimpleFacets.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/SimpleFacets.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/SimpleFacets.java Mon Oct 21 18:58:24 2013
@@ -17,20 +17,6 @@
 
 package org.apache.solr.request;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.EnumSet;
-import java.util.IdentityHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Executor;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
@@ -94,6 +80,26 @@ import org.apache.solr.util.DateMathPars
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.apache.solr.util.LongPriorityQueue;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.EnumSet;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
 /**
  * A class that generates simple Facet information for a request.
  *
@@ -334,6 +340,10 @@ public class SimpleFacets {
   }
 
   public NamedList<Integer> getTermCounts(String field) throws IOException {
+    return getTermCounts(field, this.docs);
+  }
+
+  public NamedList<Integer> getTermCounts(String field, DocSet base) throws IOException {
     int offset = params.getFieldInt(field, FacetParams.FACET_OFFSET, 0);
     int limit = params.getFieldInt(field, FacetParams.FACET_LIMIT, 100);
     if (limit == 0) return new NamedList<Integer>();
@@ -405,13 +415,13 @@ public class SimpleFacets {
     }
 
     if (params.getFieldBool(field, GroupParams.GROUP_FACET, false)) {
-      counts = getGroupedCounts(searcher, docs, field, multiToken, offset,limit, mincount, missing, sort, prefix);
+      counts = getGroupedCounts(searcher, base, field, multiToken, offset,limit, mincount, missing, sort, prefix);
     } else {
       assert method != null;
       switch (method) {
         case ENUM:
           assert TrieField.getMainValuePrefix(ft) == null;
-          counts = getFacetTermEnumCounts(searcher, docs, field, offset, limit, mincount,missing,sort,prefix);
+          counts = getFacetTermEnumCounts(searcher, base, field, offset, limit, mincount,missing,sort,prefix);
           break;
         case FCS:
           assert !multiToken;
@@ -420,9 +430,9 @@ public class SimpleFacets {
             if (prefix != null && !prefix.isEmpty()) {
               throw new SolrException(ErrorCode.BAD_REQUEST, FacetParams.FACET_PREFIX + " is not supported on numeric types");
             }
-            counts = NumericFacets.getCounts(searcher, docs, field, offset, limit, mincount, missing, sort);
+            counts = NumericFacets.getCounts(searcher, base, field, offset, limit, mincount, missing, sort);
           } else {
-            PerSegmentSingleValuedFaceting ps = new PerSegmentSingleValuedFaceting(searcher, docs, field, offset,limit, mincount, missing, sort, prefix);
+            PerSegmentSingleValuedFaceting ps = new PerSegmentSingleValuedFaceting(searcher, base, field, offset,limit, mincount, missing, sort, prefix);
             Executor executor = threads == 0 ? directExecutor : facetExecutor;
             ps.setNumThreads(threads);
             counts = ps.getFacetCounts(executor);
@@ -430,12 +440,12 @@ public class SimpleFacets {
           break;
         case FC:
           if (sf.hasDocValues()) {
-            counts = DocValuesFacets.getCounts(searcher, docs, field, offset,limit, mincount, missing, sort, prefix);
+            counts = DocValuesFacets.getCounts(searcher, base, field, offset,limit, mincount, missing, sort, prefix);
           } else if (multiToken || TrieField.getMainValuePrefix(ft) != null) {
             UnInvertedField uif = UnInvertedField.getUnInvertedField(field, searcher);
-            counts = uif.getCounts(searcher, docs, offset, limit, mincount,missing,sort,prefix);
+            counts = uif.getCounts(searcher, base, offset, limit, mincount,missing,sort,prefix);
           } else {
-            counts = getFieldCacheCounts(searcher, docs, field, offset,limit, mincount, missing, sort, prefix);
+            counts = getFieldCacheCounts(searcher, base, field, offset,limit, mincount, missing, sort, prefix);
           }
           break;
         default:
@@ -515,33 +525,92 @@ public class SimpleFacets {
    * @see #getFieldMissingCount
    * @see #getFacetTermEnumCounts
    */
+  @SuppressWarnings("unchecked")
   public NamedList<Object> getFacetFieldCounts()
-          throws IOException, SyntaxError {
+      throws IOException, SyntaxError {
 
     NamedList<Object> res = new SimpleOrderedMap<Object>();
     String[] facetFs = params.getParams(FacetParams.FACET_FIELD);
-    if (null != facetFs) {
+    if (null == facetFs) {
+      return res;
+    }
+
+    // Passing a negative number for FACET_THREADS implies an unlimited number of threads is acceptable.
+    // Also, a subtlety of directExecutor is that no matter how many times you "submit" a job, it's really
+    // just a method call in that it's run by the calling thread.
+    int maxThreads = req.getParams().getInt(FacetParams.FACET_THREADS, 0);
+    Executor executor = maxThreads == 0 ? directExecutor : facetExecutor;
+    final Semaphore semaphore = new Semaphore((maxThreads <= 0) ? Integer.MAX_VALUE : maxThreads);
+    List<Future<NamedList>> futures = new ArrayList<Future<NamedList>>(facetFs.length);
+
+    try {
+      //Loop over fields; submit to executor, keeping the future
       for (String f : facetFs) {
         parseParams(FacetParams.FACET_FIELD, f);
-        String termList = localParams == null ? null : localParams.get(CommonParams.TERMS);
-        if (termList != null) {
-          res.add(key, getListedTermCounts(facetValue, termList));
-        } else {
-          res.add(key, getTermCounts(facetValue));
-        }
+        final String termList = localParams == null ? null : localParams.get(CommonParams.TERMS);
+        final String workerKey = key;
+        final String workerFacetValue = facetValue;
+        final DocSet workerBase = this.docs;
+        Callable<NamedList> callable = new Callable<NamedList>() {
+          @Override
+          public NamedList call() throws Exception {
+            try {
+              NamedList<Object> result = new SimpleOrderedMap<Object>();
+              if(termList != null) {
+                result.add(workerKey, getListedTermCounts(workerFacetValue, termList, workerBase));
+              } else {
+                result.add(workerKey, getTermCounts(workerFacetValue, workerBase));
+              }
+              return result;
+            } catch (SolrException se) {
+              throw se;
+            } catch (Exception e) {
+              throw new SolrException(ErrorCode.SERVER_ERROR,
+                                      "Exception during facet.field: " + workerFacetValue, e.getCause());
+            } finally {
+              semaphore.release();
+            }
+          }
+        };
+
+        RunnableFuture<NamedList> runnableFuture = new FutureTask<NamedList>(callable);
+        semaphore.acquire();//may block and/or interrupt
+        executor.execute(runnableFuture);//releases semaphore when done
+        futures.add(runnableFuture);
+      }//facetFs loop
+
+      //Loop over futures to get the values. The order is the same as facetFs but shouldn't matter.
+      for (Future<NamedList> future : futures) {
+        res.addAll(future.get());
+      }
+      assert semaphore.availablePermits() >= maxThreads;
+    } catch (InterruptedException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "Error while processing facet fields: InterruptedException", e);
+    } catch (ExecutionException ee) {
+      Throwable e = ee.getCause();//unwrap
+      if (e instanceof RuntimeException) {
+        throw (RuntimeException) e;
       }
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "Error while processing facet fields: " + e.toString(), e);
     }
+
     return res;
   }
 
 
   private NamedList<Integer> getListedTermCounts(String field, String termList) throws IOException {
+    return getListedTermCounts(field, termList, this.docs);
+  }
+
+  private NamedList getListedTermCounts(String field, String termList, DocSet base) throws IOException {
     FieldType ft = searcher.getSchema().getFieldType(field);
     List<String> terms = StrUtils.splitSmart(termList, ",", true);
     NamedList<Integer> res = new NamedList<Integer>();
     for (String term : terms) {
       String internal = ft.toInternal(term);
-      int count = searcher.numDocs(new TermQuery(new Term(field, internal)), docs);
+      int count = searcher.numDocs(new TermQuery(new Term(field, internal)), base);
       res.add(term, count);
     }
     return res;    
@@ -558,7 +627,7 @@ public class SimpleFacets {
     throws IOException {
     SchemaField sf = searcher.getSchema().getField(fieldName);
     DocSet hasVal = searcher.getDocSet
-      (sf.getType().getRangeQuery(null, sf, null, null, false, false));
+        (sf.getType().getRangeQuery(null, sf, null, null, false, false));
     return docs.andNotSize(hasVal);
   }
 
@@ -1135,7 +1204,7 @@ public class SimpleFacets {
   }
 
   private <T extends Comparable<T>> NamedList getFacetRangeCounts
-    (final SchemaField sf, 
+    (final SchemaField sf,
      final RangeEndpointCalculator<T> calc) throws IOException {
     
     final String f = sf.getName();
@@ -1268,7 +1337,7 @@ public class SimpleFacets {
    */
   protected int rangeCount(SchemaField sf, String low, String high,
                            boolean iLow, boolean iHigh) throws IOException {
-    Query rangeQ = sf.getType().getRangeQuery(null, sf,low,high,iLow,iHigh);
+    Query rangeQ = sf.getType().getRangeQuery(null, sf, low, high, iLow, iHigh);
     if (params.getBool(GroupParams.GROUP_FACET, false)) {
       return getGroupedFacetQueryCount(rangeQ);
     } else {
@@ -1282,8 +1351,8 @@ public class SimpleFacets {
   @Deprecated
   protected int rangeCount(SchemaField sf, Date low, Date high,
                            boolean iLow, boolean iHigh) throws IOException {
-    Query rangeQ = ((DateField)(sf.getType())).getRangeQuery(null, sf,low,high,iLow,iHigh);
-    return searcher.numDocs(rangeQ , docs);
+    Query rangeQ = ((DateField)(sf.getType())).getRangeQuery(null, sf, low, high, iLow, iHigh);
+    return searcher.numDocs(rangeQ, docs);
   }
   
   /**

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/UnInvertedField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/UnInvertedField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/UnInvertedField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/request/UnInvertedField.java Mon Oct 21 18:58:24 2013
@@ -18,7 +18,6 @@
 package org.apache.solr.request;
 
 import java.io.IOException;
-import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
@@ -102,6 +101,15 @@ public class UnInvertedField extends Doc
 
   private SolrIndexSearcher.DocsEnumState deState;
   private final SolrIndexSearcher searcher;
+  private final boolean isPlaceholder;
+
+  private static UnInvertedField uifPlaceholder = new UnInvertedField();
+
+  private UnInvertedField() { // Dummy for synchronization.
+    super("fake", 0, 0); // cheapest initialization I can find.
+    isPlaceholder = true;
+    searcher = null;
+   }
 
   @Override
   protected void visitTerm(TermsEnum te, int termNum) throws IOException {
@@ -172,6 +180,7 @@ public class UnInvertedField extends Doc
           DEFAULT_INDEX_INTERVAL_BITS);
     //System.out.println("maxTermDocFreq=" + maxTermDocFreq + " maxDoc=" + searcher.maxDoc());
 
+    isPlaceholder = false;
     final String prefix = TrieField.getMainValuePrefix(searcher.getSchema().getFieldType(field));
     this.searcher = searcher;
     try {
@@ -650,22 +659,44 @@ public class UnInvertedField extends Doc
   //////////////////////////////////////////////////////////////////
   //////////////////////////// caching /////////////////////////////
   //////////////////////////////////////////////////////////////////
+
   public static UnInvertedField getUnInvertedField(String field, SolrIndexSearcher searcher) throws IOException {
     SolrCache<String,UnInvertedField> cache = searcher.getFieldValueCache();
     if (cache == null) {
       return new UnInvertedField(field, searcher);
     }
-
-    UnInvertedField uif = cache.get(field);
-    if (uif == null) {
-      synchronized (cache) {
-        uif = cache.get(field);
-        if (uif == null) {
-          uif = new UnInvertedField(field, searcher);
-          cache.put(field, uif);
+    UnInvertedField uif = null;
+    Boolean doWait = false;
+    synchronized (cache) {
+      uif = cache.get(field);
+      if (uif == null) {
+        cache.put(field, uifPlaceholder); // This thread will load this field, don't let other threads try.
+      } else {
+        if (uif.isPlaceholder == false) {
+          return uif;
         }
+        doWait = true; // Someone else has put the place holder in, wait for that to complete.
       }
     }
+    while (doWait) {
+      try {
+        synchronized (cache) {
+          uif = cache.get(field); // Should at least return the placeholder, NPE if not is OK.
+          if (uif.isPlaceholder == false) { // OK, another thread put this in the cache we should be good.
+            return uif;
+          }
+          cache.wait();
+        }
+      } catch (InterruptedException e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Thread interrupted in getUninvertedField.");
+      }
+    }
+
+    uif = new UnInvertedField(field, searcher);
+    synchronized (cache) {
+      cache.put(field, uif); // Note, this cleverly replaces the placeholder.
+      cache.notifyAll();
+    }
 
     return uif;
   }

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java Mon Oct 21 18:58:24 2013
@@ -19,11 +19,10 @@ package org.apache.solr.response;
 import java.io.*;
 import java.util.*;
 
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.util.BytesRef;
+import org.apache.solr.client.solrj.impl.BinaryResponseParser;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.util.JavaBinCodec;
@@ -59,7 +58,7 @@ public class BinaryResponseWriter implem
 
   @Override
   public String getContentType(SolrQueryRequest request, SolrQueryResponse response) {
-    return "application/octet-stream";
+    return BinaryResponseParser.BINARY_CONTENT_TYPE;
   }
 
   @Override

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/SchemaXmlResponseWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/SchemaXmlResponseWriter.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/SchemaXmlResponseWriter.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/SchemaXmlResponseWriter.java Mon Oct 21 18:58:24 2013
@@ -20,6 +20,7 @@ package org.apache.solr.response;
 import java.io.Writer;
 import java.io.IOException;
 
+import org.apache.solr.client.solrj.impl.XMLResponseParser;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.request.SolrQueryRequest;
 
@@ -44,6 +45,6 @@ public class SchemaXmlResponseWriter imp
 
   @Override
   public String getContentType(SolrQueryRequest request, SolrQueryResponse response) {
-    return CONTENT_TYPE_XML_UTF8;
+    return XMLResponseParser.XML_CONTENT_TYPE;
   }
 }

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/XMLResponseWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/XMLResponseWriter.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/XMLResponseWriter.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/response/XMLResponseWriter.java Mon Oct 21 18:58:24 2013
@@ -20,6 +20,7 @@ package org.apache.solr.response;
 import java.io.Writer;
 import java.io.IOException;
 
+import org.apache.solr.client.solrj.impl.XMLResponseParser;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.request.SolrQueryRequest;
 
@@ -44,6 +45,6 @@ public class XMLResponseWriter implement
 
   @Override
   public String getContentType(SolrQueryRequest request, SolrQueryResponse response) {
-    return CONTENT_TYPE_XML_UTF8;
+    return XMLResponseParser.XML_CONTENT_TYPE;
   }
 }

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/BoolField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/BoolField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/BoolField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/BoolField.java Mon Oct 21 18:58:24 2013
@@ -74,6 +74,7 @@ public class BoolField extends Primitive
 
         @Override
         public void reset() throws IOException {
+          super.reset();
           done = false;
         }
 
@@ -173,7 +174,8 @@ class BoolFieldSource extends ValueSourc
     // figure out what ord maps to true
     int nord = sindex.getValueCount();
     BytesRef br = new BytesRef();
-    int tord = -1;
+    // if no values in the segment, default trueOrd to something other then -1 (missing)
+    int tord = -2;
     for (int i=0; i<nord; i++) {
       sindex.lookupOrd(i, br);
       if (br.length==1 && br.bytes[br.offset]=='T') {

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/CollationField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/CollationField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/CollationField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/CollationField.java Mon Oct 21 18:58:24 2013
@@ -209,37 +209,23 @@ public class CollationField extends Fiel
    * its just that all methods are synced), this keeps things 
    * simple (we already have a threadlocal clone in the reused TS)
    */
-  private BytesRef analyzeRangePart(String field, String part) {
-    TokenStream source;
-      
-    try {
-      source = analyzer.tokenStream(field, part);
-      source.reset();
-    } catch (IOException e) {
-      throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e);
-    }
-      
-    TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
-    BytesRef bytes = termAtt.getBytesRef();
+  private BytesRef analyzeRangePart(String field, String part) {     
+    try (TokenStream source = analyzer.tokenStream(field, part)) {
+      source.reset();    
+      TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
+      BytesRef bytes = termAtt.getBytesRef();
 
-    // we control the analyzer here: most errors are impossible
-    try {
+      // we control the analyzer here: most errors are impossible
       if (!source.incrementToken())
         throw new IllegalArgumentException("analyzer returned no terms for range part: " + part);
       termAtt.fillBytesRef();
       assert !source.incrementToken();
-    } catch (IOException e) {
-      throw new RuntimeException("error analyzing range part: " + part, e);
-    }
       
-    try {
       source.end();
-      source.close();
+      return BytesRef.deepCopyOf(bytes);
     } catch (IOException e) {
-      throw new RuntimeException("Unable to end & close TokenStream after analyzing range part: " + part, e);
+      throw new RuntimeException("Unable to analyze range part: " + part, e);
     }
-      
-    return BytesRef.deepCopyOf(bytes);
   }
   
   @Override

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/CurrencyField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/CurrencyField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/CurrencyField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/CurrencyField.java Mon Oct 21 18:58:24 2013
@@ -146,7 +146,7 @@ public class CurrencyField extends Field
       provider = c.newInstance();
       provider.init(args);
     } catch (Exception e) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Error instantiating exhange rate provider "+exchangeRateProviderClass+": " + e.getMessage(), e);
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Error instantiating exchange rate provider "+exchangeRateProviderClass+": " + e.getMessage(), e);
     }
   }
 

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/IndexSchema.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/IndexSchema.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/IndexSchema.java Mon Oct 21 18:58:24 2013
@@ -51,6 +51,7 @@ import javax.xml.xpath.XPath;
 import javax.xml.xpath.XPathConstants;
 import javax.xml.xpath.XPathExpressionException;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.Writer;
 import java.util.ArrayList;
@@ -106,6 +107,7 @@ public class IndexSchema {
   private static final String AT = "@";
   private static final String DESTINATION_DYNAMIC_BASE = "destDynamicBase";
   private static final String MAX_CHARS = "maxChars";
+  private static final String SOLR_CORE_NAME = "solr.core.name";
   private static final String SOURCE_DYNAMIC_BASE = "sourceDynamicBase";
   private static final String SOURCE_EXPLICIT_FIELDS = "sourceExplicitFields";
   private static final String TEXT_FUNCTION = "text()";
@@ -379,6 +381,7 @@ public class IndexSchema {
     protected final HashMap<String, Analyzer> analyzers;
 
     SolrIndexAnalyzer() {
+      super(PER_FIELD_REUSE_STRATEGY);
       analyzers = analyzerCache();
     }
 
@@ -400,6 +403,8 @@ public class IndexSchema {
   }
 
   private class SolrQueryAnalyzer extends SolrIndexAnalyzer {
+    SolrQueryAnalyzer() {}
+
     @Override
     protected HashMap<String, Analyzer> analyzerCache() {
       HashMap<String, Analyzer> cache = new HashMap<String, Analyzer>();
@@ -432,7 +437,7 @@ public class IndexSchema {
       // Another case where the initialization from the test harness is different than the "real world"
       sb.append("[");
       if (loader.getCoreProperties() != null) {
-        sb.append(loader.getCoreProperties().getProperty(NAME));
+        sb.append(loader.getCoreProperties().getProperty(SOLR_CORE_NAME));
       } else {
         sb.append("null");
       }
@@ -602,10 +607,13 @@ public class IndexSchema {
         aware.inform(this);
       }
     } catch (SolrException e) {
-      throw e;
+      throw new SolrException(ErrorCode.getErrorCode(e.code()), e.getMessage() + ". Schema file is " +
+          loader.getInstanceDir() + resourceName, e);
     } catch(Exception e) {
       // unexpected exception...
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Schema Parsing Failed: " + e.getMessage(), e);
+      throw new SolrException(ErrorCode.SERVER_ERROR,
+          "Schema Parsing Failed: " + e.getMessage() + ". Schema file is " + loader.getInstanceDir() + resourceName,
+          e);
     }
 
     // create the field analyzers
@@ -667,6 +675,14 @@ public class IndexSchema {
           requiredFields.add(f);
         }
       } else if (node.getNodeName().equals(DYNAMIC_FIELD)) {
+        if( f.getDefaultValue() != null ) {
+          throw new SolrException(ErrorCode.SERVER_ERROR,
+                                  DYNAMIC_FIELD + " can not have a default value: " + name);
+        }
+        if ( f.isRequired() ) {
+          throw new SolrException(ErrorCode.SERVER_ERROR,
+                                  DYNAMIC_FIELD + " can not be required: " + name);
+        }
         if (isValidFieldGlob(name)) {
           // make sure nothing else has the same path
           addDynamicField(dFields, f);

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/OpenExchangeRatesOrgProvider.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/OpenExchangeRatesOrgProvider.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/OpenExchangeRatesOrgProvider.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/OpenExchangeRatesOrgProvider.java Mon Oct 21 18:58:24 2013
@@ -59,7 +59,8 @@ public class OpenExchangeRatesOrgProvide
   protected static final String DEFAULT_REFRESH_INTERVAL    = "1440";
   
   protected String ratesFileLocation;
-  protected int refreshInterval;
+  // configured in minutes, but stored in seconds for quicker math
+  protected int refreshIntervalSeconds;
   protected ResourceLoader resourceLoader;
   
   protected OpenExchangeRates rates;
@@ -84,7 +85,7 @@ public class OpenExchangeRatesOrgProvide
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot get exchange rate; currency was null.");
     }
     
-    if (rates.getTimestamp() + refreshInterval*60*1000 > System.currentTimeMillis()) {
+    if ((rates.getTimestamp() + refreshIntervalSeconds)*1000 < System.currentTimeMillis()) {
       log.debug("Refresh interval has expired. Refreshing exchange rates.");
       reload();
     }
@@ -159,13 +160,14 @@ public class OpenExchangeRatesOrgProvide
       if (null == ratesFileLocation) {
         throw new SolrException(ErrorCode.SERVER_ERROR, "Init param must be specified: " + PARAM_RATES_FILE_LOCATION);
       }
-      refreshInterval = Integer.parseInt(getParam(params.get(PARAM_REFRESH_INTERVAL), DEFAULT_REFRESH_INTERVAL));
+      int refreshInterval = Integer.parseInt(getParam(params.get(PARAM_REFRESH_INTERVAL), DEFAULT_REFRESH_INTERVAL));
       // Force a refresh interval of minimum one hour, since the API does not offer better resolution
       if (refreshInterval < 60) {
         refreshInterval = 60;
         log.warn("Specified refreshInterval was too small. Setting to 60 minutes which is the update rate of openexchangerates.org");
       }
       log.info("Initialized with rates="+ratesFileLocation+", refreshInterval="+refreshInterval+".");
+      refreshIntervalSeconds = refreshInterval * 60;
     } catch (SolrException e1) {
       throw e1;
     } catch (Exception e2) {
@@ -191,7 +193,7 @@ public class OpenExchangeRatesOrgProvide
   /**
    * A simple class encapsulating the JSON data from openexchangerates.org
    */
-  class OpenExchangeRates {
+  static class OpenExchangeRates {
     private Map<String, Double> rates;
     private String baseCurrency;
     private long timestamp;
@@ -261,6 +263,12 @@ public class OpenExchangeRatesOrgProvide
     public long getTimestamp() {
       return timestamp;
     }
+    /** Package protected method for test purposes
+     * @lucene.internal
+     */
+    void setTimestamp(long timestamp) {
+      this.timestamp = timestamp;
+    }
 
     public String getDisclaimer() {
       return disclaimer;

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java Mon Oct 21 18:58:24 2013
@@ -252,9 +252,11 @@ public class PreAnalyzedField extends Fi
     private byte[] binaryValue = null;
     private PreAnalyzedParser parser;
     private Reader lastReader;
+    private Reader input; // hides original input since we replay saved states (and dont reuse)
     
     public PreAnalyzedTokenizer(Reader reader, PreAnalyzedParser parser) {
       super(reader);
+      this.input = reader;
       this.parser = parser;
     }
     

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/SchemaField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/SchemaField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/SchemaField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/SchemaField.java Mon Oct 21 18:58:24 2013
@@ -239,13 +239,21 @@ public final class SchemaField extends F
 
     if (on(falseProps,INDEXED)) {
       int pp = (INDEXED 
-              | STORE_TERMVECTORS | STORE_TERMPOSITIONS | STORE_TERMOFFSETS
-              | SORT_MISSING_FIRST | SORT_MISSING_LAST);
+              | STORE_TERMVECTORS | STORE_TERMPOSITIONS | STORE_TERMOFFSETS);
       if (on(pp,trueProps)) {
         throw new RuntimeException("SchemaField: " + name + " conflicting 'true' field options for non-indexed field:" + props);
       }
       p &= ~pp;
     }
+    
+    if (on(falseProps,INDEXED) && on(falseProps,DOC_VALUES)) {
+      int pp = (SORT_MISSING_FIRST | SORT_MISSING_LAST);
+      if (on(pp,trueProps)) {
+        throw new RuntimeException("SchemaField: " + name + " conflicting 'true' field options for non-indexed/non-docValues field:" + props);
+      }
+      p &= ~pp;
+    }
+    
     if (on(falseProps,INDEXED)) {
       int pp = (OMIT_NORMS | OMIT_TF_POSITIONS | OMIT_POSITIONS);
       if (on(pp,falseProps)) {

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/StrField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/StrField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/StrField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/StrField.java Mon Oct 21 18:58:24 2013
@@ -80,9 +80,6 @@ public class StrField extends PrimitiveF
 
   @Override
   public void checkSchemaField(SchemaField field) {
-    if (field.hasDocValues() && !field.multiValued() && !(field.isRequired() || field.getDefaultValue() != null)) {
-      throw new IllegalStateException("Field " + this + " has single-valued doc values enabled, but has no default value and is not required");
-    }
   }
 }
 

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/TextField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/TextField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/TextField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/TextField.java Mon Oct 21 18:58:24 2013
@@ -20,20 +20,15 @@ package org.apache.solr.schema;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.search.*;
 import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.CachingTokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.QueryBuilder;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
 
 import java.util.Map;
-import java.util.List;
-import java.util.ArrayList;
 import java.io.IOException;
 
 /** <code>TextField</code> is the basic type for configurable text analysis.
@@ -138,195 +133,29 @@ public class TextField extends FieldType
   public static BytesRef analyzeMultiTerm(String field, String part, Analyzer analyzerIn) {
     if (part == null || analyzerIn == null) return null;
 
-    TokenStream source;
-    try {
-      source = analyzerIn.tokenStream(field, part);
+    try (TokenStream source = analyzerIn.tokenStream(field, part)){
       source.reset();
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to initialize TokenStream to analyze multiTerm term: " + part, e);
-    }
 
-    TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
-    BytesRef bytes = termAtt.getBytesRef();
+      TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
+      BytesRef bytes = termAtt.getBytesRef();
 
-    try {
       if (!source.incrementToken())
         throw  new SolrException(SolrException.ErrorCode.BAD_REQUEST,"analyzer returned no terms for multiTerm term: " + part);
       termAtt.fillBytesRef();
       if (source.incrementToken())
         throw  new SolrException(SolrException.ErrorCode.BAD_REQUEST,"analyzer returned too many terms for multiTerm term: " + part);
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"error analyzing range part: " + part, e);
-    }
 
-    try {
       source.end();
-      source.close();
+      return BytesRef.deepCopyOf(bytes);
     } catch (IOException e) {
-      throw new RuntimeException("Unable to end & close TokenStream after analyzing multiTerm term: " + part, e);
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"error analyzing range part: " + part, e);
     }
-
-    return BytesRef.deepCopyOf(bytes);
   }
 
 
   static Query parseFieldQuery(QParser parser, Analyzer analyzer, String field, String queryText) {
-    int phraseSlop = 0;
-
-    // most of the following code is taken from the Lucene QueryParser
-
-    // Use the analyzer to get all the tokens, and then build a TermQuery,
-    // PhraseQuery, or nothing based on the term count
-
-    TokenStream source;
-    try {
-      source = analyzer.tokenStream(field, queryText);
-      source.reset();
-    } catch (IOException e) {
-      throw new RuntimeException("Unable to initialize TokenStream to analyze query text", e);
-    }
-    CachingTokenFilter buffer = new CachingTokenFilter(source);
-    CharTermAttribute termAtt = null;
-    PositionIncrementAttribute posIncrAtt = null;
-    int numTokens = 0;
-
-    buffer.reset();
-
-    if (buffer.hasAttribute(CharTermAttribute.class)) {
-      termAtt = buffer.getAttribute(CharTermAttribute.class);
-    }
-    if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
-      posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
-    }
-
-    int positionCount = 0;
-    boolean severalTokensAtSamePosition = false;
-
-    boolean hasMoreTokens = false;
-    if (termAtt != null) {
-      try {
-        hasMoreTokens = buffer.incrementToken();
-        while (hasMoreTokens) {
-          numTokens++;
-          int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
-          if (positionIncrement != 0) {
-            positionCount += positionIncrement;
-          } else {
-            severalTokensAtSamePosition = true;
-          }
-          hasMoreTokens = buffer.incrementToken();
-        }
-      } catch (IOException e) {
-        // ignore
-      }
-    }
-    try {
-      // rewind the buffer stream
-      buffer.reset();
-
-      // close original stream - all tokens buffered
-      source.close();
-    }
-    catch (IOException e) {
-      // ignore
-    }
-
-    if (numTokens == 0)
-      return null;
-    else if (numTokens == 1) {
-      String term = null;
-      try {
-        boolean hasNext = buffer.incrementToken();
-        assert hasNext == true;
-        term = termAtt.toString();
-      } catch (IOException e) {
-        // safe to ignore, because we know the number of tokens
-      }
-      // return newTermQuery(new Term(field, term));
-      return new TermQuery(new Term(field, term));
-    } else {
-      if (severalTokensAtSamePosition) {
-        if (positionCount == 1) {
-          // no phrase query:
-          // BooleanQuery q = newBooleanQuery(true);
-          BooleanQuery q = new BooleanQuery(true);
-          for (int i = 0; i < numTokens; i++) {
-            String term = null;
-            try {
-              boolean hasNext = buffer.incrementToken();
-              assert hasNext == true;
-              term = termAtt.toString();
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
-            }
-
-            // Query currentQuery = newTermQuery(new Term(field, term));
-            Query currentQuery = new TermQuery(new Term(field, term));
-            q.add(currentQuery, BooleanClause.Occur.SHOULD);
-          }
-          return q;
-        }
-        else {
-          // phrase query:
-          // MultiPhraseQuery mpq = newMultiPhraseQuery();
-          MultiPhraseQuery mpq = new MultiPhraseQuery();
-          mpq.setSlop(phraseSlop);
-          List multiTerms = new ArrayList();
-          int position = -1;
-          for (int i = 0; i < numTokens; i++) {
-            String term = null;
-            int positionIncrement = 1;
-            try {
-              boolean hasNext = buffer.incrementToken();
-              assert hasNext == true;
-              term = termAtt.toString();
-              if (posIncrAtt != null) {
-                positionIncrement = posIncrAtt.getPositionIncrement();
-              }
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
-            }
-
-            if (positionIncrement > 0 && multiTerms.size() > 0) {
-              mpq.add((Term[])multiTerms.toArray(new Term[multiTerms.size()]),position);
-              multiTerms.clear();
-            }
-            position += positionIncrement;
-            multiTerms.add(new Term(field, term));
-          }
-          mpq.add((Term[])multiTerms.toArray(new Term[multiTerms.size()]),position);
-          return mpq;
-        }
-      }
-      else {
-        // PhraseQuery pq = newPhraseQuery();
-        PhraseQuery pq = new PhraseQuery();
-        pq.setSlop(phraseSlop);
-        int position = -1;
-
-
-        for (int i = 0; i < numTokens; i++) {
-          String term = null;
-          int positionIncrement = 1;
-
-          try {
-            boolean hasNext = buffer.incrementToken();
-            assert hasNext == true;
-            term = termAtt.toString();
-            if (posIncrAtt != null) {
-              positionIncrement = posIncrAtt.getPositionIncrement();
-            }
-          } catch (IOException e) {
-            // safe to ignore, because we know the number of tokens
-          }
-
-          position += positionIncrement;
-          pq.add(new Term(field, term),position);
-        }
-        return pq;
-      }
-    }
-
+    // note, this method always worked this way (but nothing calls it?) because it has no idea of quotes...
+    return new QueryBuilder(analyzer).createPhraseQuery(field, queryText);
   }
 
   public void setIsExplicitMultiTermAnalyzer(boolean isExplicitMultiTermAnalyzer) {

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/TrieField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/TrieField.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/TrieField.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/schema/TrieField.java Mon Oct 21 18:58:24 2013
@@ -24,8 +24,6 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 
-import org.apache.lucene.analysis.util.CharFilterFactory;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
 import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FieldType.NumericType;
@@ -51,8 +49,6 @@ import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.mutable.MutableValueDate;
 import org.apache.lucene.util.mutable.MutableValueLong;
-import org.apache.solr.analysis.TokenizerChain;
-import org.apache.solr.analysis.TrieTokenizerFactory;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
@@ -111,12 +107,6 @@ public class TrieField extends Primitive
                 "Invalid type specified in schema.xml for field: " + args.get("name"), e);
       }
     }
-
-    CharFilterFactory[] filterFactories = new CharFilterFactory[0];
-    TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
-    analyzer = new TokenizerChain(filterFactories, new TrieTokenizerFactory(type, precisionStep), tokenFilterFactories);
-    // for query time we only need one token, so we use the biggest possible precisionStep:
-    queryAnalyzer = new TokenizerChain(filterFactories, new TrieTokenizerFactory(type, Integer.MAX_VALUE), tokenFilterFactories);
   }
 
   @Override
@@ -223,7 +213,7 @@ public class TrieField extends Primitive
 
   @Override
   public boolean isTokenized() {
-    return true;
+    return false;
   }
 
   @Override
@@ -382,24 +372,29 @@ public class TrieField extends Primitive
   @Override
   public void readableToIndexed(CharSequence val, BytesRef result) {
     String s = val.toString();
-    switch (type) {
-      case INTEGER:
-        NumericUtils.intToPrefixCodedBytes(Integer.parseInt(s), 0, result);
-        break;
-      case FLOAT:
-        NumericUtils.intToPrefixCodedBytes(NumericUtils.floatToSortableInt(Float.parseFloat(s)), 0, result);
-        break;
-      case LONG:
-        NumericUtils.longToPrefixCodedBytes(Long.parseLong(s), 0, result);
-        break;
-      case DOUBLE:
-        NumericUtils.longToPrefixCodedBytes(NumericUtils.doubleToSortableLong(Double.parseDouble(s)), 0, result);
-        break;
-      case DATE:
-        NumericUtils.longToPrefixCodedBytes(dateField.parseMath(null, s).getTime(), 0, result);
-        break;
-      default:
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + type);
+    try {
+      switch (type) {
+        case INTEGER:
+          NumericUtils.intToPrefixCodedBytes(Integer.parseInt(s), 0, result);
+          break;
+        case FLOAT:
+          NumericUtils.intToPrefixCodedBytes(NumericUtils.floatToSortableInt(Float.parseFloat(s)), 0, result);
+          break;
+        case LONG:
+          NumericUtils.longToPrefixCodedBytes(Long.parseLong(s), 0, result);
+          break;
+        case DOUBLE:
+          NumericUtils.longToPrefixCodedBytes(NumericUtils.doubleToSortableLong(Double.parseDouble(s)), 0, result);
+          break;
+        case DATE:
+          NumericUtils.longToPrefixCodedBytes(dateField.parseMath(null, s).getTime(), 0, result);
+          break;
+        default:
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + type);
+      }
+    } catch (NumberFormatException nfe) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
+                              "Invalid Number: " + val);
     }
   }
 
@@ -696,9 +691,6 @@ public class TrieField extends Primitive
 
   @Override
   public void checkSchemaField(final SchemaField field) {
-    if (field.hasDocValues() && !field.multiValued() && !(field.isRequired() || field.getDefaultValue() != null)) {
-      throw new IllegalStateException("Field " + this + " has single-valued doc values enabled, but has no default value and is not required");
-    }
   }
 }
 

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollector.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollector.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollector.java Mon Oct 21 18:58:24 2013
@@ -29,11 +29,14 @@ import org.apache.lucene.search.Scorer;
  * </p>
  */
 public class EarlyTerminatingCollector extends Collector {
-  private int numCollected;
-  private int lastDocId = -1;
-  private int maxDocsToCollect;
-  private Collector delegate;
-  
+
+  private final int maxDocsToCollect;
+  private final Collector delegate;
+
+  private int numCollected = 0;
+  private int prevReaderCumulativeSize = 0;
+  private int currentReaderSize = 0;  
+
   /**
    * <p>
    *  Wraps a {@link Collector}, throwing {@link EarlyTerminatingCollectorException} 
@@ -44,42 +47,40 @@ public class EarlyTerminatingCollector e
    * 
    */
   public EarlyTerminatingCollector(Collector delegate, int maxDocsToCollect) {
+    assert 0 < maxDocsToCollect;
+    assert null != delegate;
+
     this.delegate = delegate;
     this.maxDocsToCollect = maxDocsToCollect;
   }
 
+  /**
+   * This collector requires that docs be collected in order, otherwise
+   * the computed number of scanned docs in the resulting 
+   * {@link EarlyTerminatingCollectorException} will be meaningless.
+   */
   @Override
   public boolean acceptsDocsOutOfOrder() {
-    return delegate.acceptsDocsOutOfOrder();
+    return false;
   }
 
   @Override
   public void collect(int doc) throws IOException {
     delegate.collect(doc);
-    lastDocId = doc;    
     numCollected++;  
-    if(numCollected==maxDocsToCollect) {
-      throw new EarlyTerminatingCollectorException(numCollected, lastDocId);
+    if(maxDocsToCollect <= numCollected) {
+      throw new EarlyTerminatingCollectorException
+        (numCollected, prevReaderCumulativeSize + (doc + 1));
     }
   }
   @Override
   public void setNextReader(AtomicReaderContext context) throws IOException {
-    delegate.setNextReader(context);    
+    prevReaderCumulativeSize += currentReaderSize; // not current any more
+    currentReaderSize = context.reader().maxDoc() - 1;
+    delegate.setNextReader(context);
   }
   @Override
   public void setScorer(Scorer scorer) throws IOException {
     delegate.setScorer(scorer);    
   }
-  public int getNumCollected() {
-    return numCollected;
-  }
-  public void setNumCollected(int numCollected) {
-    this.numCollected = numCollected;
-  }
-  public int getLastDocId() {
-    return lastDocId;
-  }
-  public void setLastDocId(int lastDocId) {
-    this.lastDocId = lastDocId;
-  }
 }

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollectorException.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollectorException.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollectorException.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/EarlyTerminatingCollectorException.java Mon Oct 21 18:58:24 2013
@@ -23,23 +23,40 @@ package org.apache.solr.search;
  */
 public class EarlyTerminatingCollectorException extends RuntimeException {
   private static final long serialVersionUID = 5939241340763428118L;  
-  private int lastDocId = -1;
+  private int numberScanned;
   private int numberCollected;
   
-  public EarlyTerminatingCollectorException(int numberCollected, int lastDocId) {
+  public EarlyTerminatingCollectorException(int numberCollected, int numberScanned) {
+    assert numberCollected <= numberScanned : numberCollected+"<="+numberScanned;
+    assert 0 < numberCollected;
+    assert 0 < numberScanned;
+
     this.numberCollected = numberCollected;
-    this.lastDocId = lastDocId;
+    this.numberScanned = numberScanned;
   }
-  public int getLastDocId() {
-    return lastDocId;
-  }
-  public void setLastDocId(int lastDocId) {
-    this.lastDocId = lastDocId;
+  /**
+   * The total number of documents in the index that were "scanned" by 
+   * the index when collecting the {@link #getNumberCollected()} documents 
+   * that triggered this exception.
+   * <p>
+   * This number represents the sum of:
+   * </p>
+   * <ul>
+   *  <li>The total number of documents in all AtomicReaders
+   *      that were fully exhausted during collection
+   *  </li>
+   *  <li>The id of the last doc collected in the last AtomicReader
+   *      consulted during collection.
+   *  </li>
+   * </ul>
+   **/
+  public int getNumberScanned() {
+    return numberScanned;
   }
+  /**
+   * The number of documents collected that resulted in early termination
+   */
   public int getNumberCollected() {
     return numberCollected;
   }
-  public void setNumberCollected(int numberCollected) {
-    this.numberCollected = numberCollected;
-  }
 }

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/QueryResultKey.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/QueryResultKey.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/QueryResultKey.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/QueryResultKey.java Mon Oct 21 18:58:24 2013
@@ -90,9 +90,36 @@ public final class QueryResultKey {
   }
 
 
-  private static boolean isEqual(Object o1, Object o2) {
-    if (o1==o2) return true;  // takes care of identity and null cases
-    if (o1==null || o2==null) return false;
-    return o1.equals(o2);
+  // Do fast version, expecting that filters are ordered and only
+  // fall back to unordered compare on the first non-equal elements.
+  // This will only be called if the hash code of the entire key already
+  // matched, so the slower unorderedCompare should pretty much never
+  // be called if filter lists are generally ordered.
+  private static boolean isEqual(List<Query> fqList1, List<Query> fqList2) {
+    if (fqList1 == fqList2) return true;  // takes care of identity and null cases
+    if (fqList1 == null || fqList2 == null) return false;
+    int sz = fqList1.size();
+    if (sz != fqList2.size()) return false;
+    for (int i = 0; i < sz; i++) {
+      if (!fqList1.get(i).equals(fqList2.get(i))) {
+        return unorderedCompare(fqList1, fqList2, i);
+      }
+    }
+    return true;
   }
+
+  private static boolean unorderedCompare(List<Query> fqList1, List<Query> fqList2, int start) {
+    int sz = fqList1.size();
+    outer:
+    for (int i = start; i < sz; i++) {
+      Query q1 = fqList1.get(i);
+      for (int j = start; j < sz; j++) {
+        if (q1.equals(fqList2.get(j)))
+          continue outer;
+      }
+      return false;
+    }
+    return true;
+  }
+
 }

Modified: lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java?rev=1534320&r1=1534319&r2=1534320&view=diff
==============================================================================
--- lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java (original)
+++ lucene/dev/branches/lucene4956/solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java Mon Oct 21 18:58:24 2013
@@ -22,6 +22,7 @@ import org.apache.lucene.search.Constant
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.join.FixedBitSetCachingWrapperFilter;
 import org.apache.lucene.search.join.ScoreMode;
 import org.apache.lucene.search.join.ToParentBlockJoinQuery;
 import org.apache.solr.common.params.SolrParams;
@@ -86,8 +87,7 @@ class BlockJoinParentQParser extends QPa
   }
 
   protected Filter createParentFilter(Query parentQ) {
-    return new CachingWrapperFilter(new QueryWrapperFilter(parentQ)) {
-    };
+    return new FixedBitSetCachingWrapperFilter(new QueryWrapperFilter(parentQ));
   }
 }