You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2010/10/20 16:51:14 UTC

svn commit: r1025606 [4/8] - in /lucene/dev/branches/branch_3x: ./ lucene/ lucene/backwards/src/ lucene/backwards/src/test/org/apache/lucene/analysis/ lucene/backwards/src/test/org/apache/lucene/document/ lucene/backwards/src/test/org/apache/lucene/ind...

Modified: lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java?rev=1025606&r1=1025605&r2=1025606&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java (original)
+++ lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java Wed Oct 20 14:51:12 2010
@@ -1,1292 +1,57 @@
-/* Generated By:JavaCC: Do not edit this line. PrecedenceQueryParser.java */
 package org.apache.lucene.queryParser.precedence;
 
-import java.io.IOException;
-import java.io.StringReader;
-import java.text.DateFormat;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-import java.util.Locale;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.document.DateTools;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.FuzzyQuery;
-import org.apache.lucene.search.MultiPhraseQuery;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermRangeQuery;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.queryParser.precedence.processors.PrecedenceQueryNodeProcessorPipeline;
+import org.apache.lucene.queryParser.standard.StandardQueryParser;
+import org.apache.lucene.queryParser.standard.processors.StandardQueryNodeProcessorPipeline;
 
 /**
- * Experimental query parser variant designed to handle operator precedence
- * in a more sensible fashion than QueryParser.  There are still some
- * open issues with this parser. The following tests are currently failing
- * in TestPrecedenceQueryParser and are disabled to make this test pass:
- * <ul>
- * <li> testSimple
- * <li> testWildcard
- * <li> testPrecedence
- * </ul>
- *
- * This class is generated by JavaCC.  The only method that clients should need
- * to call is {@link #parse(String)}.
- *
- * The syntax for query strings is as follows:
- * A Query is a series of clauses.
- * A clause may be prefixed by:
- * <ul>
- * <li> a plus (<code>+</code>) or a minus (<code>-</code>) sign, indicating
- * that the clause is required or prohibited respectively; or
- * <li> a term followed by a colon, indicating the field to be searched.
- * This enables one to construct queries which search multiple fields.
- * </ul>
- *
- * A clause may be either:
- * <ul>
- * <li> a term, indicating all the documents that contain this term; or
- * <li> a nested query, enclosed in parentheses.  Note that this may be used
- * with a <code>+</code>/<code>-</code> prefix to require any of a set of
- * terms.
- * </ul>
- *
- * Thus, in BNF, the query grammar is:
- * <pre>
- *   Query  ::= ( Clause )*
- *   Clause ::= ["+", "-"] [&lt;TERM&gt; ":"] ( &lt;TERM&gt; | "(" Query ")" )
- * </pre>
- *
  * <p>
- * Examples of appropriately formatted queries can be found in the <a
- * href="../../../../../../../queryparsersyntax.html">query syntax
- * documentation</a>.
+ * This query parser works exactly as the standard query parser ( {@link StandardQueryParser} ), 
+ * except that it respect the boolean precedence, so &lt;a AND b OR c AND d&gt; is parsed to &lt;(+a +b) (+c +d)&gt;
+ * instead of &lt;+a +b +c +d&gt;.
  * </p>
+ * <p>
+ * EXPERT: This class extends {@link StandardQueryParser}, but uses {@link PrecedenceQueryNodeProcessorPipeline}
+ * instead of {@link StandardQueryNodeProcessorPipeline} to process the query tree.
+ * </p>
+ * 
+ * @see StandardQueryParser
  */
-public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
-
-  private static final int CONJ_NONE   = 0;
-  private static final int CONJ_AND    = 1;
-  private static final int CONJ_OR     = 2;
-
-  private static final int MOD_NONE    = 0;
-  private static final int MOD_NOT     = 10;
-  private static final int MOD_REQ     = 11;
-
-  // make it possible to call setDefaultOperator() without accessing
-  // the nested class:
-  public static final Operator AND_OPERATOR = Operator.AND;
-  public static final Operator OR_OPERATOR = Operator.OR;
-
-  /** The actual operator that parser uses to combine query terms */
-  private Operator operator = OR_OPERATOR;
-
-  boolean lowercaseExpandedTerms = true;
-
-  Analyzer analyzer;
-  String field;
-  int phraseSlop = 0;
-  float fuzzyMinSim = FuzzyQuery.defaultMinSimilarity;
-  int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength;
-  Locale locale = Locale.getDefault();
-
-  static enum Operator { OR, AND }
-
-  /** Constructs a query parser.
-   *  @param f  the default field for query terms.
-   *  @param a   used to find terms in the query text.
-   */
-  public PrecedenceQueryParser(String f, Analyzer a) {
-    this(new FastCharStream(new StringReader("")));
-    analyzer = a;
-    field = f;
-  }
-
-  /** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
-   *  @param expression  the query string to be parsed.
-   *  @throws ParseException if the parsing fails
-   */
-  public Query parse(String expression) throws ParseException {
-    // optimize empty query to be empty BooleanQuery
-    if (expression == null || expression.trim().length() == 0) {
-      return new BooleanQuery();
-    }
-
-    ReInit(new FastCharStream(new StringReader(expression)));
-    try {
-      Query query = Query(field);
-      return (query != null) ? query : new BooleanQuery();
-    }
-    catch (TokenMgrError tme) {
-      throw new ParseException(tme.getMessage());
-    }
-    catch (BooleanQuery.TooManyClauses tmc) {
-      throw new ParseException("Too many boolean clauses");
-    }
-  }
-
-   /**
-   * @return Returns the analyzer.
-   */
-  public Analyzer getAnalyzer() {
-    return analyzer;
-  }
-
-  /**
-   * @return Returns the field.
-   */
-  public String getField() {
-    return field;
-  }
-
-   /**
-   * Get the minimal similarity for fuzzy queries.
-   */
-  public float getFuzzyMinSim() {
-      return fuzzyMinSim;
-  }
-
-  /**
-   * Set the minimum similarity for fuzzy queries.
-   * Default is 0.5f.
-   */
-  public void setFuzzyMinSim(float fuzzyMinSim) {
-      this.fuzzyMinSim = fuzzyMinSim;
-  }
-
-   /**
-   * Get the prefix length for fuzzy queries. 
-   * @return Returns the fuzzyPrefixLength.
-   */
-  public int getFuzzyPrefixLength() {
-    return fuzzyPrefixLength;
-  }
-
-  /**
-   * Set the prefix length for fuzzy queries. Default is 0.
-   * @param fuzzyPrefixLength The fuzzyPrefixLength to set.
-   */
-  public void setFuzzyPrefixLength(int fuzzyPrefixLength) {
-    this.fuzzyPrefixLength = fuzzyPrefixLength;
-  }
-
-  /**
-   * Sets the default slop for phrases.  If zero, then exact phrase matches
-   * are required.  Default value is zero.
-   */
-  public void setPhraseSlop(int phraseSlop) {
-    this.phraseSlop = phraseSlop;
-  }
-
-  /**
-   * Gets the default slop for phrases.
-   */
-  public int getPhraseSlop() {
-    return phraseSlop;
-  }
-
-  /**
-   * Sets the boolean operator of the QueryParser.
-   * In default mode (<code>OR_OPERATOR</code>) terms without any modifiers
-   * are considered optional: for example <code>capital of Hungary</code> is equal to
-   * <code>capital OR of OR Hungary</code>.<br/>
-   * In <code>AND_OPERATOR</code> mode terms are considered to be in conjunction: the
-   * above mentioned query is parsed as <code>capital AND of AND Hungary</code>
-   */
-  public void setDefaultOperator(Operator op) {
-    this.operator = op;
-  }
-
-  /**
-   * Gets implicit operator setting, which will be either AND_OPERATOR
-   * or OR_OPERATOR.
-   */
-  public Operator getDefaultOperator() {
-    return operator;
-  }
-
-  /**
-   * Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
-   * lower-cased or not.  Default is <code>true</code>.
-   */
-  public void setLowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
-    this.lowercaseExpandedTerms = lowercaseExpandedTerms;
-  }
-
-  /**
-   * @see #setLowercaseExpandedTerms(boolean)
-   */
-  public boolean getLowercaseExpandedTerms() {
-    return lowercaseExpandedTerms;
-  }
-
-  /**
-   * Set locale used by date range parsing.
-   */
-  public void setLocale(Locale locale) {
-    this.locale = locale;
-  }
-
-  /**
-   * Returns current locale, allowing access by subclasses.
-   */
-  public Locale getLocale() {
-    return locale;
-  }
-
-  protected void addClause(List<BooleanClause> clauses, int conj, int modifier, Query q) {
-    boolean required, prohibited;
-
-    // If this term is introduced by AND, make the preceding term required,
-    // unless it's already prohibited
-    if (clauses.size() > 0 && conj == CONJ_AND) {
-      BooleanClause c = clauses.get(clauses.size()-1);
-      if (!c.isProhibited())
-        c.setOccur(BooleanClause.Occur.MUST);
-    }
-
-    if (clauses.size() > 0 && operator == AND_OPERATOR && conj == CONJ_OR) {
-      // If this term is introduced by OR, make the preceding term optional,
-      // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
-      // notice if the input is a OR b, first term is parsed as required; without
-      // this modification a OR b would parsed as +a OR b
-      BooleanClause c = clauses.get(clauses.size()-1);
-      if (!c.isProhibited())
-        c.setOccur(BooleanClause.Occur.SHOULD);
-    }
-
-    // We might have been passed a null query; the term might have been
-    // filtered away by the analyzer.
-    if (q == null)
-      return;
-
-    if (operator == OR_OPERATOR) {
-      // We set REQUIRED if we're introduced by AND or +; PROHIBITED if
-      // introduced by NOT or -; make sure not to set both.
-      prohibited = (modifier == MOD_NOT);
-      required = (modifier == MOD_REQ);
-      if (conj == CONJ_AND && !prohibited) {
-        required = true;
-      }
-    } else {
-      // We set PROHIBITED if we're introduced by NOT or -; We set REQUIRED
-      // if not PROHIBITED and not introduced by OR
-      prohibited = (modifier == MOD_NOT);
-      required   = (!prohibited && conj != CONJ_OR);
-    }
-    if (required && !prohibited)
-      clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
-    else if (!required && !prohibited)
-      clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
-    else if (!required && prohibited)
-      clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
-    else
-      throw new RuntimeException("Clause cannot be both required and prohibited");
-  }
-
-  /**
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getFieldQuery(String field, String queryText, boolean quoted)  throws ParseException {
-    // Use the analyzer to get all the tokens, and then build a TermQuery,
-    // PhraseQuery, or nothing based on the term count
-
-    TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
-    List<AttributeSource.State> list = new ArrayList<AttributeSource.State>();
-    int positionCount = 0;
-    boolean severalTokensAtSamePosition = false;
-    CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
-    PositionIncrementAttribute posincrAtt = source.addAttribute(PositionIncrementAttribute.class);
-
-    try {
-      while (source.incrementToken()) {
-        list.add(source.captureState());
-        if (posincrAtt.getPositionIncrement() == 1)
-          positionCount++;
-        else
-          severalTokensAtSamePosition = true;
-      }
-      source.end();
-      source.close();
-    } catch (IOException e) {
-      // ignore, should never happen for StringReaders
-    }
-
-    if (list.size() == 0)
-      return null;
-    else if (list.size() == 1) {
-      source.restoreState(list.get(0));
-      return new TermQuery(new Term(field, termAtt.toString()));
-    } else {
-      if (severalTokensAtSamePosition || !quoted) {
-        if (positionCount == 1 || !quoted) {
-          // no phrase query:
-          BooleanQuery q = new BooleanQuery(positionCount == 1);
-
-          BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
-            BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
-
-          for (int i = 0; i < list.size(); i++) {
-            source.restoreState(list.get(i));
-            TermQuery currentQuery = new TermQuery(
-                new Term(field, termAtt.toString()));
-            q.add(currentQuery, occur);
-          }
-          return q;
-        }
-        else {
-          // phrase query:
-          MultiPhraseQuery mpq = new MultiPhraseQuery();
-          List<Term> multiTerms = new ArrayList<Term>();
-          for (int i = 0; i < list.size(); i++) {
-            source.restoreState(list.get(i));
-            if (posincrAtt.getPositionIncrement() == 1 && multiTerms.size() > 0) {
-              mpq.add(multiTerms.toArray(new Term[0]));
-              multiTerms.clear();
-            }
-            multiTerms.add(new Term(field, termAtt.toString()));
-          }
-          mpq.add(multiTerms.toArray(new Term[0]));
-          return mpq;
-        }
-      }
-      else {
-        PhraseQuery q = new PhraseQuery();
-        q.setSlop(phraseSlop);
-        for (int i = 0; i < list.size(); i++) {
-          source.restoreState(list.get(i));
-          q.add(new Term(field, termAtt.toString()));
-        }
-        return q;
-      }
-    }
-  }
-
-  /**
-   * Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}.
-   * This method may be overridden, for example, to return
-   * a SpanNearQuery instead of a PhraseQuery.
-   *
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getFieldQuery(String field, String queryText, int slop)
-        throws ParseException {
-    Query query = getFieldQuery(field, queryText, true);
-
-    if (query instanceof PhraseQuery) {
-      ((PhraseQuery) query).setSlop(slop);
-    }
-    if (query instanceof MultiPhraseQuery) {
-      ((MultiPhraseQuery) query).setSlop(slop);
-    }
-
-    return query;
-  }
-
-  /**
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getRangeQuery(String field,
-                                String part1,
-                                String part2,
-                                boolean inclusive) throws ParseException
-  {
-    if (lowercaseExpandedTerms) {
-      part1 = part1.toLowerCase();
-      part2 = part2.toLowerCase();
-    }
-    try {
-      DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, locale);
-      df.setLenient(true);
-      Date d1 = df.parse(part1);
-      Date d2 = df.parse(part2);
-      part1 = DateTools.dateToString(d1, DateTools.Resolution.DAY);
-      part2 = DateTools.dateToString(d2, DateTools.Resolution.DAY);
-    }
-    catch (Exception e) { }
-
-    return new TermRangeQuery(field, part1, part2, inclusive, inclusive);
-  }
-
-  /**
-   * Factory method for generating query, given a set of clauses.
-   * By default creates a boolean query composed of clauses passed in.
-   *
-   * Can be overridden by extending classes, to modify query being
-   * returned.
-   *
-   * @param clauses List that contains {@link BooleanClause} instances
-   *    to join.
-   *
-   * @return Resulting {@link Query} object.
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException
-  {
-    return getBooleanQuery(clauses, false);
-  }
-
-  /**
-   * Factory method for generating query, given a set of clauses.
-   * By default creates a boolean query composed of clauses passed in.
-   *
-   * Can be overridden by extending classes, to modify query being
-   * returned.
-   *
-   * @param clauses List that contains {@link BooleanClause} instances
-   *    to join.
-   * @param disableCoord true if coord scoring should be disabled.
-   *
-   * @return Resulting {@link Query} object.
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord)
-      throws ParseException {
-    if (clauses == null || clauses.size() == 0)
-      return null;
-
-    BooleanQuery query = new BooleanQuery(disableCoord);
-    for (int i = 0; i < clauses.size(); i++) {
-      query.add(clauses.get(i));
-    }
-    return query;
-  }
-
-  /**
-   * Factory method for generating a query. Called when parser
-   * parses an input term token that contains one or more wildcard
-   * characters (? and *), but is not a prefix term token (one
-   * that has just a single * character at the end)
-   *<p>
-   * Depending on settings, prefix term may be lower-cased
-   * automatically. It will not go through the default Analyzer,
-   * however, since normal Analyzers are unlikely to work properly
-   * with wildcard templates.
-   *<p>
-   * Can be overridden by extending classes, to provide custom handling for
-   * wildcard queries, which may be necessary due to missing analyzer calls.
-   *
-   * @param field Name of the field query will use.
-   * @param termStr Term token that contains one or more wild card
-   *   characters (? or *), but is not simple prefix term
-   *
-   * @return Resulting {@link Query} built for the term
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getWildcardQuery(String field, String termStr) throws ParseException
-  {
-    if (lowercaseExpandedTerms) {
-      termStr = termStr.toLowerCase();
-    }
-    Term t = new Term(field, termStr);
-    return new WildcardQuery(t);
-  }
-
-  /**
-   * Factory method for generating a query (similar to
-   * {@link #getWildcardQuery}). Called when parser parses an input term
-   * token that uses prefix notation; that is, contains a single '*' wildcard
-   * character as its last character. Since this is a special case
-   * of generic wildcard term, and such a query can be optimized easily,
-   * this usually results in a different query object.
-   *<p>
-   * Depending on settings, a prefix term may be lower-cased
-   * automatically. It will not go through the default Analyzer,
-   * however, since normal Analyzers are unlikely to work properly
-   * with wildcard templates.
-   *<p>
-   * Can be overridden by extending classes, to provide custom handling for
-   * wild card queries, which may be necessary due to missing analyzer calls.
-   *
-   * @param field Name of the field query will use.
-   * @param termStr Term token to use for building term for the query
-   *    (<b>without</b> trailing '*' character!)
-   *
-   * @return Resulting {@link Query} built for the term
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getPrefixQuery(String field, String termStr) throws ParseException
-  {
-    if (lowercaseExpandedTerms) {
-      termStr = termStr.toLowerCase();
-    }
-    Term t = new Term(field, termStr);
-    return new PrefixQuery(t);
-  }
-
-   /**
-   * Factory method for generating a query (similar to
-   * {@link #getWildcardQuery}). Called when parser parses
-   * an input term token that has the fuzzy suffix (~) appended.
-   *
-   * @param field Name of the field query will use.
-   * @param termStr Term token to use for building term for the query
-   *
-   * @return Resulting {@link Query} built for the term
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
-  {
-    if (lowercaseExpandedTerms) {
-      termStr = termStr.toLowerCase();
-    }
-    Term t = new Term(field, termStr);
-    return new FuzzyQuery(t, minSimilarity, fuzzyPrefixLength);
-  }
-
+public class PrecedenceQueryParser extends StandardQueryParser {
+  
   /**
-   * Returns a String where the escape char has been
-   * removed, or kept only once if there was a double escape.
+   * @see StandardQueryParser#StandardQueryParser()
    */
-  private String discardEscapeChar(String input) {
-    char[] caSource = input.toCharArray();
-    char[] caDest = new char[caSource.length];
-    int j = 0;
-    for (int i = 0; i < caSource.length; i++) {
-      if ((caSource[i] != '\\') || (i > 0 && caSource[i-1] == '\\')) {
-        caDest[j++]=caSource[i];
-      }
-    }
-    return new String(caDest, 0, j);
+  public PrecedenceQueryParser() {
+    setQueryNodeProcessor(new PrecedenceQueryNodeProcessorPipeline(getQueryConfigHandler()));
   }
-
+  
   /**
-   * Returns a String where those characters that QueryParser
-   * expects to be escaped are escaped by a preceding <code>\</code>.
+   * @see StandardQueryParser#StandardQueryParser(Analyzer)
    */
-  public static String escape(String s) {
-    StringBuffer sb = new StringBuffer();
-    for (int i = 0; i < s.length(); i++) {
-      char c = s.charAt(i);
-      // NOTE: keep this in sync with _ESCAPED_CHAR below!
-      if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' || c == ':'
-        || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~'
-        || c == '*' || c == '?') {
-        sb.append('\\');
-      }
-      sb.append(c);
-    }
-    return sb.toString();
-  }
-
-  /**
-   * Command line tool to test QueryParser, using {@link org.apache.lucene.analysis.SimpleAnalyzer}.
-   * Usage:<br>
-   * <code>java org.apache.lucene.queryParser.QueryParser &lt;input&gt;</code>
-   */
-  public static void main(String[] args) throws Exception {
-    if (args.length == 0) {
-      System.out.println("Usage: java org.apache.lucene.queryParser.QueryParser <input>");
-      System.exit(0);
-    }
-    PrecedenceQueryParser qp = new PrecedenceQueryParser("field",
-                           new org.apache.lucene.analysis.SimpleAnalyzer());
-    Query q = qp.parse(args[0]);
-    System.out.println(q.toString("field"));
-  }
-
-// *   Query  ::= ( Clause )*
-// *   Clause ::= ["+", "-"] [<TERM> ":"] ( <TERM> | "(" Query ")" )
-  final public int Conjunction() throws ParseException {
-  int ret = CONJ_NONE;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-    case AND:
-    case OR:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case AND:
-        jj_consume_token(AND);
-            ret = CONJ_AND;
-        break;
-      case OR:
-        jj_consume_token(OR);
-              ret = CONJ_OR;
-        break;
-      default:
-        jj_la1[0] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      break;
-    default:
-      jj_la1[1] = jj_gen;
-      ;
-    }
-    {if (true) return ret;}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public int Modifier() throws ParseException {
-  int ret = MOD_NONE;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-    case NOT:
-    case PLUS:
-    case MINUS:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case PLUS:
-        jj_consume_token(PLUS);
-              ret = MOD_REQ;
-        break;
-      case MINUS:
-        jj_consume_token(MINUS);
-                 ret = MOD_NOT;
-        break;
-      case NOT:
-        jj_consume_token(NOT);
-               ret = MOD_NOT;
-        break;
-      default:
-        jj_la1[2] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      break;
-    default:
-      jj_la1[3] = jj_gen;
-      ;
-    }
-    {if (true) return ret;}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public Query Query(String field) throws ParseException {
-  List<BooleanClause> clauses = new ArrayList<BooleanClause>();
-  Query q, firstQuery=null;
-  boolean orPresent = false;
-  int modifier;
-    modifier = Modifier();
-    q = andExpression(field);
-    addClause(clauses, CONJ_NONE, modifier, q);
-    if (modifier == MOD_NONE)
-      firstQuery = q;
-    label_1:
-    while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case OR:
-      case NOT:
-      case PLUS:
-      case MINUS:
-      case LPAREN:
-      case QUOTED:
-      case TERM:
-      case PREFIXTERM:
-      case WILDTERM:
-      case RANGEIN_START:
-      case RANGEEX_START:
-      case NUMBER:
-        ;
-        break;
-      default:
-        jj_la1[4] = jj_gen;
-        break label_1;
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case OR:
-        jj_consume_token(OR);
-            orPresent=true;
-        break;
-      default:
-        jj_la1[5] = jj_gen;
-        ;
-      }
-      modifier = Modifier();
-      q = andExpression(field);
-      addClause(clauses, orPresent ? CONJ_OR : CONJ_NONE, modifier, q);
-    }
-      if (clauses.size() == 1 && firstQuery != null)
-        {if (true) return firstQuery;}
-      else {
-        {if (true) return getBooleanQuery(clauses);}
-      }
-    throw new Error("Missing return statement in function");
-  }
-
-  final public Query andExpression(String field) throws ParseException {
-  List<BooleanClause> clauses = new ArrayList<BooleanClause>();
-  Query q, firstQuery=null;
-  int modifier;
-    q = Clause(field);
-    addClause(clauses, CONJ_NONE, MOD_NONE, q);
-    firstQuery = q;
-    label_2:
-    while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case AND:
-        ;
-        break;
-      default:
-        jj_la1[6] = jj_gen;
-        break label_2;
-      }
-      jj_consume_token(AND);
-      modifier = Modifier();
-      q = Clause(field);
-      addClause(clauses, CONJ_AND, modifier, q);
-    }
-      if (clauses.size() == 1 && firstQuery != null)
-        {if (true) return firstQuery;}
-      else {
-        {if (true) return getBooleanQuery(clauses);}
-      }
-    throw new Error("Missing return statement in function");
-  }
-
-  final public Query Clause(String field) throws ParseException {
-  Query q;
-  Token fieldToken=null, boost=null;
-    if (jj_2_1(2)) {
-      fieldToken = jj_consume_token(TERM);
-      jj_consume_token(COLON);
-      field=discardEscapeChar(fieldToken.image);
-    } else {
-      ;
-    }
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-    case QUOTED:
-    case TERM:
-    case PREFIXTERM:
-    case WILDTERM:
-    case RANGEIN_START:
-    case RANGEEX_START:
-    case NUMBER:
-      q = Term(field);
-      break;
-    case LPAREN:
-      jj_consume_token(LPAREN);
-      q = Query(field);
-      jj_consume_token(RPAREN);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
-        jj_consume_token(CARAT);
-        boost = jj_consume_token(NUMBER);
-        break;
-      default:
-        jj_la1[7] = jj_gen;
-        ;
-      }
-      break;
-    default:
-      jj_la1[8] = jj_gen;
-      jj_consume_token(-1);
-      throw new ParseException();
-    }
-      if (boost != null) {
-        float f = (float)1.0;
-  try {
-    f = Float.valueOf(boost.image).floatValue();
-          q.setBoost(f);
-  } catch (Exception ignored) { }
-      }
-      {if (true) return q;}
-    throw new Error("Missing return statement in function");
-  }
-
-  final public Query Term(String field) throws ParseException {
-  Token term, boost=null, fuzzySlop=null, goop1, goop2;
-  boolean prefix = false;
-  boolean wildcard = false;
-  boolean fuzzy = false;
-  Query q;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-    case TERM:
-    case PREFIXTERM:
-    case WILDTERM:
-    case NUMBER:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case TERM:
-        term = jj_consume_token(TERM);
-        break;
-      case PREFIXTERM:
-        term = jj_consume_token(PREFIXTERM);
-                             prefix=true;
-        break;
-      case WILDTERM:
-        term = jj_consume_token(WILDTERM);
-                           wildcard=true;
-        break;
-      case NUMBER:
-        term = jj_consume_token(NUMBER);
-        break;
-      default:
-        jj_la1[9] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case FUZZY_SLOP:
-        fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                fuzzy=true;
-        break;
-      default:
-        jj_la1[10] = jj_gen;
-        ;
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
-        jj_consume_token(CARAT);
-        boost = jj_consume_token(NUMBER);
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case FUZZY_SLOP:
-          fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                                         fuzzy=true;
-          break;
-        default:
-          jj_la1[11] = jj_gen;
-          ;
-        }
-        break;
-      default:
-        jj_la1[12] = jj_gen;
-        ;
-      }
-       String termImage=discardEscapeChar(term.image);
-       if (wildcard) {
-       q = getWildcardQuery(field, termImage);
-       } else if (prefix) {
-         q = getPrefixQuery(field,
-           discardEscapeChar(term.image.substring
-          (0, term.image.length()-1)));
-       } else if (fuzzy) {
-          float fms = fuzzyMinSim;
-          try {
-            fms = Float.valueOf(fuzzySlop.image.substring(1)).floatValue();
-          } catch (Exception ignored) { }
-         if(fms < 0.0f || fms > 1.0f){
-           {if (true) throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !");}
-         }
-         q = getFuzzyQuery(field, termImage, fms);
-       } else {
-         q = getFieldQuery(field, termImage, false);
-       }
-      break;
-    case RANGEIN_START:
-      jj_consume_token(RANGEIN_START);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_GOOP:
-        goop1 = jj_consume_token(RANGEIN_GOOP);
-        break;
-      case RANGEIN_QUOTED:
-        goop1 = jj_consume_token(RANGEIN_QUOTED);
-        break;
-      default:
-        jj_la1[13] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_TO:
-        jj_consume_token(RANGEIN_TO);
-        break;
-      default:
-        jj_la1[14] = jj_gen;
-        ;
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_GOOP:
-        goop2 = jj_consume_token(RANGEIN_GOOP);
-        break;
-      case RANGEIN_QUOTED:
-        goop2 = jj_consume_token(RANGEIN_QUOTED);
-        break;
-      default:
-        jj_la1[15] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      jj_consume_token(RANGEIN_END);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
-        jj_consume_token(CARAT);
-        boost = jj_consume_token(NUMBER);
-        break;
-      default:
-        jj_la1[16] = jj_gen;
-        ;
-      }
-          if (goop1.kind == RANGEIN_QUOTED) {
-            goop1.image = goop1.image.substring(1, goop1.image.length()-1);
-          } else {
-            goop1.image = discardEscapeChar(goop1.image);
-          }
-          if (goop2.kind == RANGEIN_QUOTED) {
-            goop2.image = goop2.image.substring(1, goop2.image.length()-1);
-      } else {
-        goop2.image = discardEscapeChar(goop2.image);
-      }
-          q = getRangeQuery(field, goop1.image, goop2.image, true);
-      break;
-    case RANGEEX_START:
-      jj_consume_token(RANGEEX_START);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEEX_GOOP:
-        goop1 = jj_consume_token(RANGEEX_GOOP);
-        break;
-      case RANGEEX_QUOTED:
-        goop1 = jj_consume_token(RANGEEX_QUOTED);
-        break;
-      default:
-        jj_la1[17] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEEX_TO:
-        jj_consume_token(RANGEEX_TO);
-        break;
-      default:
-        jj_la1[18] = jj_gen;
-        ;
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEEX_GOOP:
-        goop2 = jj_consume_token(RANGEEX_GOOP);
-        break;
-      case RANGEEX_QUOTED:
-        goop2 = jj_consume_token(RANGEEX_QUOTED);
-        break;
-      default:
-        jj_la1[19] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      jj_consume_token(RANGEEX_END);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
-        jj_consume_token(CARAT);
-        boost = jj_consume_token(NUMBER);
-        break;
-      default:
-        jj_la1[20] = jj_gen;
-        ;
-      }
-          if (goop1.kind == RANGEEX_QUOTED) {
-            goop1.image = goop1.image.substring(1, goop1.image.length()-1);
-          } else {
-            goop1.image = discardEscapeChar(goop1.image);
-          }
-          if (goop2.kind == RANGEEX_QUOTED) {
-            goop2.image = goop2.image.substring(1, goop2.image.length()-1);
-      } else {
-        goop2.image = discardEscapeChar(goop2.image);
-      }
-
-          q = getRangeQuery(field, goop1.image, goop2.image, false);
-      break;
-    case QUOTED:
-      term = jj_consume_token(QUOTED);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case FUZZY_SLOP:
-        fuzzySlop = jj_consume_token(FUZZY_SLOP);
-        break;
-      default:
-        jj_la1[21] = jj_gen;
-        ;
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
-        jj_consume_token(CARAT);
-        boost = jj_consume_token(NUMBER);
-        break;
-      default:
-        jj_la1[22] = jj_gen;
-        ;
-      }
-         int s = phraseSlop;
-
-         if (fuzzySlop != null) {
-           try {
-             s = Float.valueOf(fuzzySlop.image.substring(1)).intValue();
-           }
-           catch (Exception ignored) { }
-         }
-         q = getFieldQuery(field, term.image.substring(1, term.image.length()-1), s);
-      break;
-    default:
-      jj_la1[23] = jj_gen;
-      jj_consume_token(-1);
-      throw new ParseException();
-    }
-    if (boost != null) {
-      float f = (float) 1.0;
-      try {
-        f = Float.valueOf(boost.image).floatValue();
-      }
-      catch (Exception ignored) {
-    /* Should this be handled somehow? (defaults to "no boost", if
-     * boost number is invalid)
-     */
-      }
-
-      // avoid boosting null queries, such as those caused by stop words
-      if (q != null) {
-        q.setBoost(f);
-      }
-    }
-    {if (true) return q;}
-    throw new Error("Missing return statement in function");
-  }
-
-  private boolean jj_2_1(int xla) {
-    jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_1(); }
-    catch(LookaheadSuccess ls) { return true; }
-    finally { jj_save(0, xla); }
-  }
-
-  private boolean jj_3_1() {
-    if (jj_scan_token(TERM)) return true;
-    if (jj_scan_token(COLON)) return true;
-    return false;
-  }
-
-  /** Generated Token Manager. */
-  public PrecedenceQueryParserTokenManager token_source;
-  /** Current token. */
-  public Token token;
-  /** Next token. */
-  public Token jj_nt;
-  private int jj_ntk;
-  private Token jj_scanpos, jj_lastpos;
-  private int jj_la;
-  private int jj_gen;
-  final private int[] jj_la1 = new int[24];
-  static private int[] jj_la1_0;
-  static {
-      jj_la1_init_0();
-   }
-   private static void jj_la1_init_0() {
-      jj_la1_0 = new int[] {0x180,0x180,0xe00,0xe00,0xfb1f00,0x100,0x80,0x8000,0xfb1000,0x9a0000,0x40000,0x40000,0x8000,0xc000000,0x1000000,0xc000000,0x8000,0xc0000000,0x10000000,0xc0000000,0x8000,0x40000,0x8000,0xfb0000,};
-   }
-  final private JJCalls[] jj_2_rtns = new JJCalls[1];
-  private boolean jj_rescan = false;
-  private int jj_gc = 0;
-
-  /** Constructor with user supplied CharStream. */
-  public PrecedenceQueryParser(CharStream stream) {
-    token_source = new PrecedenceQueryParserTokenManager(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 24; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
-  }
-
-  /** Reinitialise. */
-  public void ReInit(CharStream stream) {
-    token_source.ReInit(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 24; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
-  }
-
-  /** Constructor with generated Token Manager. */
-  public PrecedenceQueryParser(PrecedenceQueryParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 24; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
-  }
-
-  /** Reinitialise. */
-  public void ReInit(PrecedenceQueryParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 24; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
-  }
-
-  private Token jj_consume_token(int kind) throws ParseException {
-    Token oldToken;
-    if ((oldToken = token).next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    if (token.kind == kind) {
-      jj_gen++;
-      if (++jj_gc > 100) {
-        jj_gc = 0;
-        for (int i = 0; i < jj_2_rtns.length; i++) {
-          JJCalls c = jj_2_rtns[i];
-          while (c != null) {
-            if (c.gen < jj_gen) c.first = null;
-            c = c.next;
-          }
-        }
-      }
-      return token;
-    }
-    token = oldToken;
-    jj_kind = kind;
-    throw generateParseException();
-  }
-
-  static private final class LookaheadSuccess extends java.lang.Error { }
-  final private LookaheadSuccess jj_ls = new LookaheadSuccess();
-  private boolean jj_scan_token(int kind) {
-    if (jj_scanpos == jj_lastpos) {
-      jj_la--;
-      if (jj_scanpos.next == null) {
-        jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
-      } else {
-        jj_lastpos = jj_scanpos = jj_scanpos.next;
-      }
-    } else {
-      jj_scanpos = jj_scanpos.next;
-    }
-    if (jj_rescan) {
-      int i = 0; Token tok = token;
-      while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
-      if (tok != null) jj_add_error_token(kind, i);
-    }
-    if (jj_scanpos.kind != kind) return true;
-    if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
-    return false;
-  }
-
-
-/** Get the next Token. */
-  final public Token getNextToken() {
-    if (token.next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    jj_gen++;
-    return token;
-  }
-
-/** Get the specific Token. */
-  final public Token getToken(int index) {
-    Token t = token;
-    for (int i = 0; i < index; i++) {
-      if (t.next != null) t = t.next;
-      else t = t.next = token_source.getNextToken();
-    }
-    return t;
-  }
-
-  private int jj_ntk() {
-    if ((jj_nt=token.next) == null)
-      return (jj_ntk = (token.next=token_source.getNextToken()).kind);
-    else
-      return (jj_ntk = jj_nt.kind);
-  }
-
-  private java.util.List<int[]> jj_expentries = new java.util.ArrayList<int[]>();
-  private int[] jj_expentry;
-  private int jj_kind = -1;
-  private int[] jj_lasttokens = new int[100];
-  private int jj_endpos;
-
-  private void jj_add_error_token(int kind, int pos) {
-    if (pos >= 100) return;
-    if (pos == jj_endpos + 1) {
-      jj_lasttokens[jj_endpos++] = kind;
-    } else if (jj_endpos != 0) {
-      jj_expentry = new int[jj_endpos];
-      for (int i = 0; i < jj_endpos; i++) {
-        jj_expentry[i] = jj_lasttokens[i];
-      }
-      jj_entries_loop: for (java.util.Iterator it = jj_expentries.iterator(); it.hasNext();) {
-        int[] oldentry = (int[])(it.next());
-        if (oldentry.length == jj_expentry.length) {
-          for (int i = 0; i < jj_expentry.length; i++) {
-            if (oldentry[i] != jj_expentry[i]) {
-              continue jj_entries_loop;
-            }
-          }
-          jj_expentries.add(jj_expentry);
-          break jj_entries_loop;
-        }
-      }
-      if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
-    }
-  }
-
-  /** Generate ParseException. */
-  public ParseException generateParseException() {
-    jj_expentries.clear();
-    boolean[] la1tokens = new boolean[32];
-    if (jj_kind >= 0) {
-      la1tokens[jj_kind] = true;
-      jj_kind = -1;
-    }
-    for (int i = 0; i < 24; i++) {
-      if (jj_la1[i] == jj_gen) {
-        for (int j = 0; j < 32; j++) {
-          if ((jj_la1_0[i] & (1<<j)) != 0) {
-            la1tokens[j] = true;
-          }
-        }
-      }
-    }
-    for (int i = 0; i < 32; i++) {
-      if (la1tokens[i]) {
-        jj_expentry = new int[1];
-        jj_expentry[0] = i;
-        jj_expentries.add(jj_expentry);
-      }
-    }
-    jj_endpos = 0;
-    jj_rescan_token();
-    jj_add_error_token(0, 0);
-    int[][] exptokseq = new int[jj_expentries.size()][];
-    for (int i = 0; i < jj_expentries.size(); i++) {
-      exptokseq[i] = jj_expentries.get(i);
-    }
-    return new ParseException(token, exptokseq, tokenImage);
-  }
-
-  /** Enable tracing. */
-  final public void enable_tracing() {
-  }
-
-  /** Disable tracing. */
-  final public void disable_tracing() {
-  }
-
-  private void jj_rescan_token() {
-    jj_rescan = true;
-    for (int i = 0; i < 1; i++) {
-    try {
-      JJCalls p = jj_2_rtns[i];
-      do {
-        if (p.gen > jj_gen) {
-          jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
-          switch (i) {
-            case 0: jj_3_1(); break;
-          }
-        }
-        p = p.next;
-      } while (p != null);
-      } catch(LookaheadSuccess ls) { }
-    }
-    jj_rescan = false;
-  }
-
-  private void jj_save(int index, int xla) {
-    JJCalls p = jj_2_rtns[index];
-    while (p.gen > jj_gen) {
-      if (p.next == null) { p = p.next = new JJCalls(); break; }
-      p = p.next;
-    }
-    p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla;
-  }
-
-  static final class JJCalls {
-    int gen;
-    Token first;
-    int arg;
-    JJCalls next;
+  public PrecedenceQueryParser(Analyzer analyer) {
+    super(analyer);
+    
+    setQueryNodeProcessor(new PrecedenceQueryNodeProcessorPipeline(getQueryConfigHandler()));
+    
   }
 
 }

Modified: lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/package.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/package.html?rev=1025606&r1=1025605&r2=1025606&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/package.html (original)
+++ lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/package.html Wed Oct 20 14:51:12 2010
@@ -16,7 +16,24 @@
  limitations under the License.
 -->
 <html>
+<head>
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+</head>
 <body>
-QueryParser designed to handle operator precedence in a more sensible fashion than the default QueryParser.
+
+This package contains the Precedence Query Parser Implementation
+
+<h2>Lucene Precedence Query Parser</h2>
+
+<p>
+The Precedence Query Parser extends the Standard Query Parser and enables 
+the boolean precedence. So, the query <a AND b OR c AND d> is parsed to 
+<(+a +b) (+c +d)> instead of <+a +b +c +d>.
+</p>
+<p>
+Check {@link org.apache.lucene.queryParser.standard.StandardQueryParser} for more details about the
+supported syntax and query parser functionalities. 
+</p>
+
 </body>
 </html>

Modified: lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java?rev=1025606&r1=1025605&r2=1025606&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (original)
+++ lucene/dev/branches/branch_3x/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java Wed Oct 20 14:51:12 2010
@@ -17,63 +17,86 @@ package org.apache.lucene.queryParser.pr
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.io.Reader;
+import java.text.DateFormat;
+import java.util.Calendar;
+import java.util.Collections;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseTokenizer;
 import org.apache.lucene.analysis.SimpleAnalyzer;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
+
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.DateField;
 import org.apache.lucene.document.DateTools;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.queryParser.TestQueryParser;
+import org.apache.lucene.queryParser.core.QueryNodeException;
+import org.apache.lucene.queryParser.core.QueryNodeParseException;
+import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator;
+import org.apache.lucene.queryParser.standard.parser.ParseException;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.FuzzyQuery;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.PrefixQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermRangeQuery;
+
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.search.WildcardQuery;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.io.IOException;
-import java.io.Reader;
-import java.text.DateFormat;
-import java.util.Calendar;
-import java.util.GregorianCalendar;
-import java.util.Collections;
-
+/**
+ * <p>
+ * This test case tests {@link PrecedenceQueryParser}.
+ * </p>
+ * <p>
+ * It contains all tests from {@link TestQueryParser} with some adjusted to 
+ * fit the precedence requirement, plus some precedence test cases. 
+ * </p>
+ * 
+ * @see TestQueryParser
+ */
 public class TestPrecedenceQueryParser extends LuceneTestCase {
+
   public static Analyzer qpAnalyzer = new QPTestAnalyzer();
 
   public static final class QPTestFilter extends TokenFilter {
     /**
-     * Filter which discards the token 'stop' and which expands the
-     * token 'phrase' into 'phrase1 phrase2'
+     * Filter which discards the token 'stop' and which expands the token
+     * 'phrase' into 'phrase1 phrase2'
      */
     public QPTestFilter(TokenStream in) {
       super(in);
     }
 
     boolean inPhrase = false;
+
     int savedStart = 0, savedEnd = 0;
 
     CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+
     OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
-    
-    @Override
+
     public boolean incrementToken() throws IOException {
-      clearAttributes();
       if (inPhrase) {
         inPhrase = false;
         termAtt.setEmpty().append("phrase2");
         offsetAtt.setOffset(savedStart, savedEnd);
         return true;
       } else
-        while(input.incrementToken())
+        while (input.incrementToken())
           if (termAtt.toString().equals("phrase")) {
             inPhrase = true;
             savedStart = offsetAtt.startOffset();
@@ -90,31 +113,13 @@ public class TestPrecedenceQueryParser e
   public static final class QPTestAnalyzer extends Analyzer {
 
     /** Filters LowerCaseTokenizer with StopFilter. */
-    @Override
     public final TokenStream tokenStream(String fieldName, Reader reader) {
       return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
     }
   }
 
-  public static class QPTestParser extends PrecedenceQueryParser {
-    public QPTestParser(String f, Analyzer a) {
-      super(f, a);
-    }
-
-    @Override
-    protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException {
-      throw new ParseException("Fuzzy queries not allowed");
-    }
-
-    @Override
-    protected Query getWildcardQuery(String field, String termStr) throws ParseException {
-      throw new ParseException("Wildcard queries not allowed");
-    }
-  }
-
   private int originalMaxClauses;
 
-  @Override
   public void setUp() throws Exception {
     super.setUp();
     originalMaxClauses = BooleanQuery.getMaxClauseCount();
@@ -123,40 +128,42 @@ public class TestPrecedenceQueryParser e
   public PrecedenceQueryParser getParser(Analyzer a) throws Exception {
     if (a == null)
       a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
-    PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a);
-    qp.setDefaultOperator(PrecedenceQueryParser.OR_OPERATOR);
+    PrecedenceQueryParser qp = new PrecedenceQueryParser();
+    qp.setAnalyzer(a);
+    qp.setDefaultOperator(Operator.OR);
     return qp;
   }
 
   public Query getQuery(String query, Analyzer a) throws Exception {
-    return getParser(a).parse(query);
+    return getParser(a).parse(query, "field");
   }
 
   public void assertQueryEquals(String query, Analyzer a, String result)
-    throws Exception {
+      throws Exception {
     Query q = getQuery(query, a);
     String s = q.toString("field");
     if (!s.equals(result)) {
-      fail("Query /" + query + "/ yielded /" + s
-           + "/, expecting /" + result + "/");
+      fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result
+          + "/");
     }
   }
 
-  public void assertWildcardQueryEquals(String query, boolean lowercase, String result)
-    throws Exception {
+  public void assertWildcardQueryEquals(String query, boolean lowercase,
+      String result) throws Exception {
     PrecedenceQueryParser qp = getParser(null);
     qp.setLowercaseExpandedTerms(lowercase);
-    Query q = qp.parse(query);
+    Query q = qp.parse(query, "field");
     String s = q.toString("field");
     if (!s.equals(result)) {
-      fail("WildcardQuery /" + query + "/ yielded /" + s
-           + "/, expecting /" + result + "/");
+      fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /"
+          + result + "/");
     }
   }
 
-  public void assertWildcardQueryEquals(String query, String result) throws Exception {
+  public void assertWildcardQueryEquals(String query, String result)
+      throws Exception {
     PrecedenceQueryParser qp = getParser(null);
-    Query q = qp.parse(query);
+    Query q = qp.parse(query, "field");
     String s = q.toString("field");
     if (!s.equals(result)) {
       fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /"
@@ -164,36 +171,30 @@ public class TestPrecedenceQueryParser e
     }
   }
 
-  public Query getQueryDOA(String query, Analyzer a)
-    throws Exception {
+  public Query getQueryDOA(String query, Analyzer a) throws Exception {
     if (a == null)
       a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
-    PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a);
-    qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR);
-    return qp.parse(query);
+    PrecedenceQueryParser qp = new PrecedenceQueryParser();
+    qp.setAnalyzer(a);
+    qp.setDefaultOperator(Operator.AND);
+    return qp.parse(query, "field");
   }
 
   public void assertQueryEqualsDOA(String query, Analyzer a, String result)
-    throws Exception {
+      throws Exception {
     Query q = getQueryDOA(query, a);
     String s = q.toString("field");
     if (!s.equals(result)) {
-      fail("Query /" + query + "/ yielded /" + s
-           + "/, expecting /" + result + "/");
+      fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result
+          + "/");
     }
   }
 
-  // failing tests disabled since PrecedenceQueryParser
-  // is currently unmaintained
-  public void _testSimple() throws Exception {
-    assertQueryEquals("", null, "");
-
+  public void testSimple() throws Exception {
     assertQueryEquals("term term term", null, "term term term");
     assertQueryEquals("türm term term", null, "türm term term");
     assertQueryEquals("ümlaut", null, "ümlaut");
 
-    assertQueryEquals("+a", null, "+a");
-    assertQueryEquals("-a", null, "-a");
     assertQueryEquals("a AND b", null, "+a +b");
     assertQueryEquals("(a AND b)", null, "+a +b");
     assertQueryEquals("c OR (a AND b)", null, "c (+a +b)");
@@ -208,9 +209,9 @@ public class TestPrecedenceQueryParser e
 
     assertQueryEquals("+term -term term", null, "+term -term term");
     assertQueryEquals("foo:term AND field:anotherTerm", null,
-                      "+foo:term +anotherterm");
+        "+foo:term +anotherterm");
     assertQueryEquals("term AND \"phrase phrase\"", null,
-                      "+term +\"phrase phrase\"");
+        "+term +\"phrase phrase\"");
     assertQueryEquals("\"hello there\"", null, "\"hello there\"");
     assertTrue(getQuery("a AND b", null) instanceof BooleanQuery);
     assertTrue(getQuery("hello", null) instanceof TermQuery);
@@ -225,25 +226,25 @@ public class TestPrecedenceQueryParser e
     assertQueryEquals("\"term germ\"^2", null, "\"term germ\"^2.0");
 
     assertQueryEquals("(foo OR bar) AND (baz OR boo)", null,
-                      "+(foo bar) +(baz boo)");
-    assertQueryEquals("((a OR b) AND NOT c) OR d", null,
-                      "(+(a b) -c) d");
+        "+(foo bar) +(baz boo)");
+    assertQueryEquals("((a OR b) AND NOT c) OR d", null, "(+(a b) -c) d");
     assertQueryEquals("+(apple \"steve jobs\") -(foo bar baz)", null,
-                      "+(apple \"steve jobs\") -(foo bar baz)");
+        "+(apple \"steve jobs\") -(foo bar baz)");
     assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
-                      "+(title:dog title:cat) -author:\"bob dole\"");
-    
-    PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer(TEST_VERSION_CURRENT));
+        "+(title:dog title:cat) -author:\"bob dole\"");
+
+    PrecedenceQueryParser qp = new PrecedenceQueryParser();
+    qp.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
     // make sure OR is the default:
-    assertEquals(PrecedenceQueryParser.OR_OPERATOR, qp.getDefaultOperator());
-    qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR);
-    assertEquals(PrecedenceQueryParser.AND_OPERATOR, qp.getDefaultOperator());
-    qp.setDefaultOperator(PrecedenceQueryParser.OR_OPERATOR);
-    assertEquals(PrecedenceQueryParser.OR_OPERATOR, qp.getDefaultOperator());
-
-    assertQueryEquals("a OR !b", null, "a (-b)");
-    assertQueryEquals("a OR ! b", null, "a (-b)");
-    assertQueryEquals("a OR -b", null, "a (-b)");
+    assertEquals(Operator.OR, qp.getDefaultOperator());
+    qp.setDefaultOperator(Operator.AND);
+    assertEquals(Operator.AND, qp.getDefaultOperator());
+    qp.setDefaultOperator(Operator.OR);
+    assertEquals(Operator.OR, qp.getDefaultOperator());
+
+    assertQueryEquals("a OR !b", null, "a -b");
+    assertQueryEquals("a OR ! b", null, "a -b");
+    assertQueryEquals("a OR -b", null, "a -b");
   }
 
   public void testPunct() throws Exception {
@@ -262,7 +263,7 @@ public class TestPrecedenceQueryParser e
   }
 
   public void testNumber() throws Exception {
-// The numbers go away because SimpleAnalzyer ignores them
+    // The numbers go away because SimpleAnalzyer ignores them
     assertQueryEquals("3", null, "");
     assertQueryEquals("term 1.0 1 2", null, "term");
     assertQueryEquals("term term1 term2", null, "term term term");
@@ -272,74 +273,14 @@ public class TestPrecedenceQueryParser e
     assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
     assertQueryEquals("term term1 term2", a, "term term1 term2");
   }
-  
-  public void testCJKTerm() throws Exception {
-    // individual CJK chars as terms
-    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
-    
-    BooleanQuery expected = new BooleanQuery();
-    expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD);
-    expected.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
-    
-    assertEquals(expected, getQuery("中国", analyzer));
-  }
-  
-  public void testCJKBoostedTerm() throws Exception {
-    // individual CJK chars as terms
-    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
-    
-    BooleanQuery expected = new BooleanQuery();
-    expected.setBoost(0.5f);
-    expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD);
-    expected.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
-    
-    assertEquals(expected, getQuery("中国^0.5", analyzer));
-  }
-  
-  public void testCJKPhrase() throws Exception {
-    // individual CJK chars as terms
-    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
-    
-    PhraseQuery expected = new PhraseQuery();
-    expected.add(new Term("field", "中"));
-    expected.add(new Term("field", "国"));
-    
-    assertEquals(expected, getQuery("\"中国\"", analyzer));
-  }
-  
-  public void testCJKBoostedPhrase() throws Exception {
-    // individual CJK chars as terms
-    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
-    
-    PhraseQuery expected = new PhraseQuery();
-    expected.setBoost(0.5f);
-    expected.add(new Term("field", "中"));
-    expected.add(new Term("field", "国"));
-    
-    assertEquals(expected, getQuery("\"中国\"^0.5", analyzer));
-  }
-  
-  public void testCJKSloppyPhrase() throws Exception {
-    // individual CJK chars as terms
-    StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
-    
-    PhraseQuery expected = new PhraseQuery();
-    expected.setSlop(3);
-    expected.add(new Term("field", "中"));
-    expected.add(new Term("field", "国"));
-    
-    assertEquals(expected, getQuery("\"中国\"~3", analyzer));
-  }
 
-  // failing tests disabled since PrecedenceQueryParser
-  // is currently unmaintained
-  public void _testWildcard() throws Exception {
+  public void testWildcard() throws Exception {
     assertQueryEquals("term*", null, "term*");
     assertQueryEquals("term*^2", null, "term*^2.0");
     assertQueryEquals("term~", null, "term~0.5");
     assertQueryEquals("term~0.7", null, "term~0.7");
-    assertQueryEquals("term~^2", null, "term^2.0~0.5");
-    assertQueryEquals("term^2~", null, "term^2.0~0.5");
+    assertQueryEquals("term~^3", null, "term~0.5^3.0");
+    assertQueryEquals("term^3~", null, "term~0.5^3.0");
     assertQueryEquals("term*germ", null, "term*germ");
     assertQueryEquals("term*germ^3", null, "term*germ^3.0");
 
@@ -347,24 +288,25 @@ public class TestPrecedenceQueryParser e
     assertTrue(getQuery("term*^2", null) instanceof PrefixQuery);
     assertTrue(getQuery("term~", null) instanceof FuzzyQuery);
     assertTrue(getQuery("term~0.7", null) instanceof FuzzyQuery);
-    FuzzyQuery fq = (FuzzyQuery)getQuery("term~0.7", null);
+    FuzzyQuery fq = (FuzzyQuery) getQuery("term~0.7", null);
     assertEquals(0.7f, fq.getMinSimilarity(), 0.1f);
     assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength());
-    fq = (FuzzyQuery)getQuery("term~", null);
+    fq = (FuzzyQuery) getQuery("term~", null);
     assertEquals(0.5f, fq.getMinSimilarity(), 0.1f);
     assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength());
     try {
-      getQuery("term~1.1", null);   // value > 1, throws exception
+      getQuery("term~1.1", null); // value > 1, throws exception
       fail();
-    } catch(ParseException pe) {
+    } catch (ParseException pe) {
       // expected exception
     }
     assertTrue(getQuery("term*germ", null) instanceof WildcardQuery);
 
-/* Tests to see that wild card terms are (or are not) properly
-	 * lower-cased with propery parser configuration
-	 */
-// First prefix queries:
+    /*
+     * Tests to see that wild card terms are (or are not) properly lower-cased
+     * with propery parser configuration
+     */
+    // First prefix queries:
     // by default, convert to lowercase:
     assertWildcardQueryEquals("Term*", true, "term*");
     // explicitly set lowercase:
@@ -375,7 +317,7 @@ public class TestPrecedenceQueryParser e
     assertWildcardQueryEquals("term*", false, "term*");
     assertWildcardQueryEquals("Term*", false, "Term*");
     assertWildcardQueryEquals("TERM*", false, "TERM*");
-// Then 'full' wildcard queries:
+    // Then 'full' wildcard queries:
     // by default, convert to lowercase:
     assertWildcardQueryEquals("Te?m", "te?m");
     // explicitly set lowercase:
@@ -388,11 +330,11 @@ public class TestPrecedenceQueryParser e
     assertWildcardQueryEquals("Te?m", false, "Te?m");
     assertWildcardQueryEquals("TE?M", false, "TE?M");
     assertWildcardQueryEquals("Te?m*gerM", false, "Te?m*gerM");
-//  Fuzzy queries:
+    // Fuzzy queries:
     assertWildcardQueryEquals("Term~", "term~0.5");
     assertWildcardQueryEquals("Term~", true, "term~0.5");
     assertWildcardQueryEquals("Term~", false, "Term~0.5");
-//  Range queries:
+    // Range queries:
     assertWildcardQueryEquals("[A TO C]", "[a TO c]");
     assertWildcardQueryEquals("[A TO C]", true, "[a TO c]");
     assertWildcardQueryEquals("[A TO C]", false, "[A TO C]");
@@ -404,11 +346,11 @@ public class TestPrecedenceQueryParser e
     assertQueryEquals("term -stop term", qpAnalyzer, "term term");
     assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll");
     assertQueryEquals("term phrase term", qpAnalyzer,
-                      "term (phrase1 phrase2) term");
+        "term (phrase1 phrase2) term");
     // note the parens in this next assertion differ from the original
     // QueryParser behavior
     assertQueryEquals("term AND NOT phrase term", qpAnalyzer,
-                      "(+term -(phrase1 phrase2)) term");
+        "(+term -(phrase1 phrase2)) term");
     assertQueryEquals("stop", qpAnalyzer, "");
     assertQueryEquals("stop OR stop AND stop", qpAnalyzer, "");
     assertTrue(getQuery("term term term", qpAnalyzer) instanceof BooleanQuery);
@@ -425,9 +367,10 @@ public class TestPrecedenceQueryParser e
     assertQueryEquals("[ a TO z] OR bar", null, "[a TO z] bar");
     assertQueryEquals("[ a TO z] AND bar", null, "+[a TO z] +bar");
     assertQueryEquals("( bar blar { a TO z}) ", null, "bar blar {a TO z}");
-    assertQueryEquals("gack ( bar blar { a TO z}) ", null, "gack (bar blar {a TO z})");
+    assertQueryEquals("gack ( bar blar { a TO z}) ", null,
+        "gack (bar blar {a TO z})");
   }
-  
+
   private String escapeDateString(String s) {
     if (s.contains(" ")) {
       return "\"" + s + "\"";
@@ -441,53 +384,106 @@ public class TestPrecedenceQueryParser e
     return DateTools.dateToString(df.parse(s), DateTools.Resolution.DAY);
   }
 
-  public String getLocalizedDate(int year, int month, int day) {
+  private String getLocalizedDate(int year, int month, int day,
+      boolean extendLastDate) {
     DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
     Calendar calendar = new GregorianCalendar();
-    calendar.clear();
     calendar.set(year, month, day);
-    calendar.set(Calendar.HOUR_OF_DAY, 23);
-    calendar.set(Calendar.MINUTE, 59);
-    calendar.set(Calendar.SECOND, 59);
-    calendar.set(Calendar.MILLISECOND, 999);
+    if (extendLastDate) {
+      calendar.set(Calendar.HOUR_OF_DAY, 23);
+      calendar.set(Calendar.MINUTE, 59);
+      calendar.set(Calendar.SECOND, 59);
+      calendar.set(Calendar.MILLISECOND, 999);
+    }
     return df.format(calendar.getTime());
   }
 
   public void testDateRange() throws Exception {
-    String startDate = getLocalizedDate(2002, 1, 1);
-    String endDate = getLocalizedDate(2002, 1, 4);
-    assertQueryEquals("[ " + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]", null,
-                      "[" + getDate(startDate) + " TO " + getDate(endDate) + "]");
-    assertQueryEquals("{  " + escapeDateString(startDate) + "    " + escapeDateString(endDate) + "   }", null,
-                      "{" + getDate(startDate) + " TO " + getDate(endDate) + "}");
+    String startDate = getLocalizedDate(2002, 1, 1, false);
+    String endDate = getLocalizedDate(2002, 1, 4, false);
+    Calendar endDateExpected = new GregorianCalendar();
+    endDateExpected.set(2002, 1, 4, 23, 59, 59);
+    endDateExpected.set(Calendar.MILLISECOND, 999);
+    final String defaultField = "default";
+    final String monthField = "month";
+    final String hourField = "hour";
+    PrecedenceQueryParser qp = new PrecedenceQueryParser(new StandardAnalyzer(TEST_VERSION_CURRENT));
+
+    // Don't set any date resolution and verify if DateField is used
+    assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
+        endDateExpected.getTime(), null);
+
+    Map<CharSequence, DateTools.Resolution> fieldMap = new HashMap<CharSequence,DateTools.Resolution>();
+    // set a field specific date resolution
+    fieldMap.put(monthField, DateTools.Resolution.MONTH);
+    qp.setDateResolution(fieldMap);
+
+    // DateField should still be used for defaultField
+    assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
+        endDateExpected.getTime(), null);
+
+    // set default date resolution to MILLISECOND
+    qp.setDateResolution(DateTools.Resolution.MILLISECOND);
+
+    // set second field specific date resolution
+    fieldMap.put(hourField, DateTools.Resolution.HOUR);
+    qp.setDateResolution(fieldMap);
+
+    // for this field no field specific date resolution has been set,
+    // so verify if the default resolution is used
+    assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
+        endDateExpected.getTime(), DateTools.Resolution.MILLISECOND);
+
+    // verify if field specific date resolutions are used for these two fields
+    assertDateRangeQueryEquals(qp, monthField, startDate, endDate,
+        endDateExpected.getTime(), DateTools.Resolution.MONTH);
+
+    assertDateRangeQueryEquals(qp, hourField, startDate, endDate,
+        endDateExpected.getTime(), DateTools.Resolution.HOUR);
+  }
+
+  /** for testing DateTools support */
+  private String getDate(String s, DateTools.Resolution resolution)
+      throws Exception {
+    DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
+    return getDate(df.parse(s), resolution);
+  }
+
+  /** for testing DateTools support */
+  private String getDate(Date d, DateTools.Resolution resolution)
+      throws Exception {
+    if (resolution == null) {
+      return DateField.dateToString(d);
+    } else {
+      return DateTools.dateToString(d, resolution);
+    }
+  }
+
+  public void assertQueryEquals(PrecedenceQueryParser qp, String field, String query,
+      String result) throws Exception {
+    Query q = qp.parse(query, field);
+    String s = q.toString(field);
+    if (!s.equals(result)) {
+      fail("Query /" + query + "/ yielded /" + s + "/, expecting /" + result
+          + "/");
+    }
+  }
+
+  public void assertDateRangeQueryEquals(PrecedenceQueryParser qp, String field,
+      String startDate, String endDate, Date endDateInclusive,
+      DateTools.Resolution resolution) throws Exception {
+    assertQueryEquals(qp, field, field + ":[" + escapeDateString(startDate)
+        + " TO " + escapeDateString(endDate) + "]", "["
+        + getDate(startDate, resolution) + " TO "
+        + getDate(endDateInclusive, resolution) + "]");
+    assertQueryEquals(qp, field, field + ":{" + escapeDateString(startDate)
+        + " TO " + escapeDateString(endDate) + "}", "{"
+        + getDate(startDate, resolution) + " TO "
+        + getDate(endDate, resolution) + "}");
   }
 
   public void testEscaped() throws Exception {
     Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
-    
-    /*assertQueryEquals("\\[brackets", a, "\\[brackets");
-    assertQueryEquals("\\[brackets", null, "brackets");
-    assertQueryEquals("\\\\", a, "\\\\");
-    assertQueryEquals("\\+blah", a, "\\+blah");
-    assertQueryEquals("\\(blah", a, "\\(blah");
-
-    assertQueryEquals("\\-blah", a, "\\-blah");
-    assertQueryEquals("\\!blah", a, "\\!blah");
-    assertQueryEquals("\\{blah", a, "\\{blah");
-    assertQueryEquals("\\}blah", a, "\\}blah");
-    assertQueryEquals("\\:blah", a, "\\:blah");
-    assertQueryEquals("\\^blah", a, "\\^blah");
-    assertQueryEquals("\\[blah", a, "\\[blah");
-    assertQueryEquals("\\]blah", a, "\\]blah");
-    assertQueryEquals("\\\"blah", a, "\\\"blah");
-    assertQueryEquals("\\(blah", a, "\\(blah");
-    assertQueryEquals("\\)blah", a, "\\)blah");
-    assertQueryEquals("\\~blah", a, "\\~blah");
-    assertQueryEquals("\\*blah", a, "\\*blah");
-    assertQueryEquals("\\?blah", a, "\\?blah");
-    //assertQueryEquals("foo \\&\\& bar", a, "foo \\&\\& bar");
-    //assertQueryEquals("foo \\|| bar", a, "foo \\|| bar");
-    //assertQueryEquals("foo \\AND bar", a, "foo \\AND bar");*/
 
     assertQueryEquals("a\\-b:c", a, "a-b:c");
     assertQueryEquals("a\\+b:c", a, "a+b:c");
@@ -521,44 +517,29 @@ public class TestPrecedenceQueryParser e
     assertQueryEquals("[ a\\\\ TO a\\* ]", null, "[a\\ TO a*]");
   }
 
-  public void testTabNewlineCarriageReturn()
-    throws Exception {
-    assertQueryEqualsDOA("+weltbank +worlbank", null,
-      "+weltbank +worlbank");
-
-    assertQueryEqualsDOA("+weltbank\n+worlbank", null,
-      "+weltbank +worlbank");
-    assertQueryEqualsDOA("weltbank \n+worlbank", null,
-      "+weltbank +worlbank");
-    assertQueryEqualsDOA("weltbank \n +worlbank", null,
-      "+weltbank +worlbank");
-
-    assertQueryEqualsDOA("+weltbank\r+worlbank", null,
-      "+weltbank +worlbank");
-    assertQueryEqualsDOA("weltbank \r+worlbank", null,
-      "+weltbank +worlbank");
-    assertQueryEqualsDOA("weltbank \r +worlbank", null,
-      "+weltbank +worlbank");
-
-    assertQueryEqualsDOA("+weltbank\r\n+worlbank", null,
-      "+weltbank +worlbank");
-    assertQueryEqualsDOA("weltbank \r\n+worlbank", null,
-      "+weltbank +worlbank");
-    assertQueryEqualsDOA("weltbank \r\n +worlbank", null,
-      "+weltbank +worlbank");
+  public void testTabNewlineCarriageReturn() throws Exception {
+    assertQueryEqualsDOA("+weltbank +worlbank", null, "+weltbank +worlbank");
+
+    assertQueryEqualsDOA("+weltbank\n+worlbank", null, "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \n+worlbank", null, "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \n +worlbank", null, "+weltbank +worlbank");
+
+    assertQueryEqualsDOA("+weltbank\r+worlbank", null, "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r+worlbank", null, "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r +worlbank", null, "+weltbank +worlbank");
+
+    assertQueryEqualsDOA("+weltbank\r\n+worlbank", null, "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r\n+worlbank", null, "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \r\n +worlbank", null, "+weltbank +worlbank");
     assertQueryEqualsDOA("weltbank \r \n +worlbank", null,
-      "+weltbank +worlbank");
+        "+weltbank +worlbank");
 
-    assertQueryEqualsDOA("+weltbank\t+worlbank", null,
-      "+weltbank +worlbank");
-    assertQueryEqualsDOA("weltbank \t+worlbank", null,
-      "+weltbank +worlbank");
-    assertQueryEqualsDOA("weltbank \t +worlbank", null,
-      "+weltbank +worlbank");
+    assertQueryEqualsDOA("+weltbank\t+worlbank", null, "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \t+worlbank", null, "+weltbank +worlbank");
+    assertQueryEqualsDOA("weltbank \t +worlbank", null, "+weltbank +worlbank");
   }
 
-  public void testSimpleDAO()
-    throws Exception {
+  public void testSimpleDAO() throws Exception {
     assertQueryEqualsDOA("term term term", null, "+term +term +term");
     assertQueryEqualsDOA("term +term term", null, "+term +term +term");
     assertQueryEqualsDOA("term term +term", null, "+term +term +term");
@@ -566,22 +547,23 @@ public class TestPrecedenceQueryParser e
     assertQueryEqualsDOA("-term term term", null, "-term +term +term");
   }
 
-  public void testBoost()
-    throws Exception {
+  public void testBoost() throws Exception {
     StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.singleton("on"));
-    PrecedenceQueryParser qp = new PrecedenceQueryParser("field", oneStopAnalyzer);
-    Query q = qp.parse("on^1.0");
+
+    PrecedenceQueryParser qp = new PrecedenceQueryParser();
+    qp.setAnalyzer(oneStopAnalyzer);
+    Query q = qp.parse("on^1.0", "field");
     assertNotNull(q);
-    q = qp.parse("\"hello\"^2.0");
+    q = qp.parse("\"hello\"^2.0", "field");
     assertNotNull(q);
     assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
-    q = qp.parse("hello^2.0");
+    q = qp.parse("hello^2.0", "field");
     assertNotNull(q);
     assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
-    q = qp.parse("\"on\"^1.0");
+    q = qp.parse("\"on\"^1.0", "field");
     assertNotNull(q);
 
-    q = getParser(new StandardAnalyzer(TEST_VERSION_CURRENT)).parse("the^3");
+    q = getParser(new StandardAnalyzer(TEST_VERSION_CURRENT)).parse("the^3", "field");
     assertNotNull(q);
   }
 
@@ -589,76 +571,75 @@ public class TestPrecedenceQueryParser e
     try {
       assertQueryEquals("\"some phrase", null, "abc");
       fail("ParseException expected, not thrown");
-    } catch (ParseException expected) {
-    }
-  }
-
-  public void testCustomQueryParserWildcard() {
-    try {
-      new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t");
-    } catch (ParseException expected) {
-      return;
+    } catch (QueryNodeParseException expected) {
     }
-    fail("Wildcard queries should not be allowed");
-  }
-
-  public void testCustomQueryParserFuzzy() throws Exception {
-    try {
-      new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~");
-    } catch (ParseException expected) {
-      return;
-    }
-    fail("Fuzzy queries should not be allowed");
   }
 
   public void testBooleanQuery() throws Exception {
     BooleanQuery.setMaxClauseCount(2);
     try {
-      getParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("one two three");
+      getParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("one two three", "field");
       fail("ParseException expected due to too many boolean clauses");
-    } catch (ParseException expected) {
+    } catch (QueryNodeException expected) {
       // too many boolean clauses, so ParseException is expected
     }
   }
 
   /**
-   * This test differs from the original QueryParser, showing how the
-   * precedence issue has been corrected.
+   * This test differs from the original QueryParser, showing how the precedence
+   * issue has been corrected.
    */
-  // failing tests disabled since PrecedenceQueryParser
-  // is currently unmaintained
-  public void _testPrecedence() throws Exception {
+  public void testPrecedence() throws Exception {
     PrecedenceQueryParser parser = getParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
-    Query query1 = parser.parse("A AND B OR C AND D");
-    Query query2 = parser.parse("(A AND B) OR (C AND D)");
+    Query query1 = parser.parse("A AND B OR C AND D", "field");
+    Query query2 = parser.parse("(A AND B) OR (C AND D)", "field");
     assertEquals(query1, query2);
 
-    query1 = parser.parse("A OR B C");
-    query2 = parser.parse("A B C");
+    query1 = parser.parse("A OR B C", "field");
+    query2 = parser.parse("(A B) C", "field");
     assertEquals(query1, query2);
 
-    query1 = parser.parse("A AND B C");
-    query2 = parser.parse("(+A +B) C");
+    query1 = parser.parse("A AND B C", "field");
+    query2 = parser.parse("(+A +B) C", "field");
     assertEquals(query1, query2);
 
-    query1 = parser.parse("A AND NOT B");
-    query2 = parser.parse("+A -B");
+    query1 = parser.parse("A AND NOT B", "field");
+    query2 = parser.parse("+A -B", "field");
     assertEquals(query1, query2);
 
-    query1 = parser.parse("A OR NOT B");
-    query2 = parser.parse("A -B");
+    query1 = parser.parse("A OR NOT B", "field");
+    query2 = parser.parse("A -B", "field");
     assertEquals(query1, query2);
 
-    query1 = parser.parse("A OR NOT B AND C");
-    query2 = parser.parse("A (-B +C)");
+    query1 = parser.parse("A OR NOT B AND C", "field");
+    query2 = parser.parse("A (-B +C)", "field");
+    assertEquals(query1, query2);
+    
+    parser.setDefaultOperator(Operator.AND);
+    query1 = parser.parse("A AND B OR C AND D", "field");
+    query2 = parser.parse("(A AND B) OR (C AND D)", "field");
+    assertEquals(query1, query2);
+
+    query1 = parser.parse("A AND B C", "field");
+    query2 = parser.parse("(A B) C", "field");
+    assertEquals(query1, query2);
+
+    query1 = parser.parse("A AND B C", "field");
+    query2 = parser.parse("(+A +B) C", "field");
     assertEquals(query1, query2);
-  }
 
+    query1 = parser.parse("A AND NOT B", "field");
+    query2 = parser.parse("+A -B", "field");
+    assertEquals(query1, query2);
+
+    query1 = parser.parse("A AND NOT B OR C", "field");
+    query2 = parser.parse("(+A -B) OR C", "field");
+    assertEquals(query1, query2);
+    
+  }
 
-  @Override
-  public void tearDown() throws Exception {
+  public void tearDown() {
     BooleanQuery.setMaxClauseCount(originalMaxClauses);
-    super.tearDown();
   }
 
 }

Propchange: lucene/dev/branches/branch_3x/lucene/src/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Oct 20 14:51:12 2010
@@ -1,4 +1,4 @@
-/lucene/dev/trunk/lucene/src:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,932998-932999,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950458,950613,951126,951355,951397,951521,953407,955547,955613,955615,955796-955797,955809-955996,956097,956125,956173,956316,956715,957465,957520,957634,957707,960367,960371,960374,960719,962555,963372,963654,963720,963781,963873,963906,963909,963920,964019,964054,964430,9
 64459,964720,964753,964832,964856,965103,965110,965222,965230,965299,965327,965330,965585,966354,966878,967080,979453,979809,980369,980428,980436,980501,980909,980911,980917,981265,981550,981598,981650,981661,981857,981936,982073,982084,982201,982725,982824,983100,983212,983216,983313,983495,983500,983530,983622,983632,983778,984187,984202,984232,984510,984968,985453,985455,985672,985875,986158,986173,986612,987122,988087,988206,988216,988259,988346,988478,988527,988543,988592,988613,988688,988710,988736,988739,989004,989010,989013,989030,989035,989315,989321,989334,989785,990160-990161,990180,990189,990281,990301,990451,990459,990766,990781,990854,991053,991191,991310,991497,992424,992469,992567,992571,992623,993106,993194,993199,993287,993408,994935,994976,994979,995247,995250,995376,995772,996268,996357,996416,996511,996611,996623,996647-996653,996720,996942,996978,997180,997230,998055,998505,998684,999016,999223,999545,999842,1000424,1000581,1000675,1001006,1001420,10016
 61,1001796,1002032,1003614,1003631,1003645,1003841-1003852,1003873,1003877,1003906,1003938,1003954,1003978,1003990,1004038,1004082,1004179,1004200,1004215,1004241,1004335,1005310,1005356,1005363,1006146,1006290,1006324,1021340,1021357,1021360,1021449,1021969-1021971,1022165,1022191,1022632,1022708-1022710,1022730-1022735,1022748-1022755,1022762-1022793,1022798-1022802,1022826,1022927,1022939,1022956,1022989,1022998,1023006,1023009,1023022,1023040,1023106,1023235-1023246,1023250,1023264-1023265,1023312,1023330,1023346,1023355,1023493,1023509-1023511,1023562,1023579-1023588,1023621,1023635,1023637,1023711,1023845,1023870,1024196,1024219,1024256,1024402,1024408,1024476
+/lucene/dev/trunk/lucene/src:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,932998-932999,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950458,950613,951126,951355,951397,951521,953407,955547,955613,955615,955796-955797,955809-955996,956097,956125,956173,956316,956715,957465,957520,957634,957707,960367,960371,960374,960719,962555,963372,963654,963720,963781,963873,963906,963909,963920,964019,964054,964430,9
 64459,964720,964753,964832,964856,965103,965110,965222,965230,965299,965327,965330,965585,966354,966878,967080,979453,979809,980369,980428,980436,980501,980909,980911,980917,981265,981550,981598,981650,981661,981857,981936,982073,982084,982201,982725,982824,983100,983212,983216,983313,983495,983500,983530,983622,983632,983778,984187,984202,984232,984510,984968,985453,985455,985672,985875,986158,986173,986612,987122,988087,988206,988216,988259,988346,988478,988527,988543,988592,988613,988688,988710,988736,988739,989004,989010,989013,989030,989035,989315,989321,989334,989785,990160-990161,990180,990189,990281,990301,990451,990459,990766,990781,990854,991053,991191,991310,991497,992424,992469,992567,992571,992623,993106,993194,993199,993287,993408,994935,994976,994979,995247,995250,995376,995772,996268,996357,996416,996511,996611,996623,996647-996653,996720,996942,996978,997180,997230,998055,998505,998684,999016,999223,999545,999842,1000424,1000581,1000675,1001006,1001420,10016
 61,1001796,1002032,1003614,1003631,1003645,1003841-1003852,1003873,1003877,1003906,1003938,1003954,1003978,1003990,1004038,1004082,1004179,1004200,1004215,1004241,1004335,1005310,1005356,1005363,1006146,1006290,1006324,1021340,1021357,1021360,1021449,1021969-1021971,1022165,1022191,1022632,1022708-1022710,1022730-1022735,1022748-1022755,1022762-1022793,1022798-1022802,1022826,1022927,1022939,1022956,1022989,1022998,1023006,1023009,1023022,1023040,1023106,1023235-1023246,1023250,1023264-1023265,1023312,1023330,1023346,1023355,1023493,1023509-1023511,1023562,1023579-1023588,1023621,1023635,1023637,1023711,1023845,1023870,1024196,1024219,1024256,1024402,1024408,1024476,1025597
 /lucene/dev/trunk/src:932749
 /lucene/java/branches/flex_1458/src:924791,924850,930201
 /lucene/java/branches/lucene_2_4/src:748824