You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2011/12/13 19:23:23 UTC

svn commit: r1213825 [1/2] - in /lucene/dev/branches/lucene3622: ./ lucene/ lucene/src/java/org/apache/lucene/index/ lucene/src/java/org/apache/lucene/search/ lucene/src/java/org/apache/lucene/util/ lucene/src/test-framework/java/org/apache/lucene/anal...

Author: rmuir
Date: Tue Dec 13 18:23:22 2011
New Revision: 1213825

URL: http://svn.apache.org/viewvc?rev=1213825&view=rev
Log:
LUCENE-3622: merge trunk (1213324:1213812)

Added:
    lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/util/CommandLineUtil.java
      - copied unchanged from r1213812, lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/CommandLineUtil.java
    lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java
      - copied unchanged from r1213812, lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java
Removed:
    lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/search/CachingWrapperFilterHelper.java
Modified:
    lucene/dev/branches/lucene3622/   (props changed)
    lucene/dev/branches/lucene3622/lucene/   (props changed)
    lucene/dev/branches/lucene3622/lucene/CHANGES.txt
    lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/CheckIndex.java
    lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexReader.java
    lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java
    lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java
    lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/FilteredQuery.java
    lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
    lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java
    lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java
    lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
    lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
    lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
    lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
    lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
    lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
    lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
    lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
    lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java
    lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
    lucene/dev/branches/lucene3622/solr/   (props changed)
    lucene/dev/branches/lucene3622/solr/CHANGES.txt   (contents, props changed)
    lucene/dev/branches/lucene3622/solr/core/   (props changed)
    lucene/dev/branches/lucene3622/solr/core/src/java/   (props changed)
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/core/SolrCore.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java
    lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java
    lucene/dev/branches/lucene3622/solr/core/src/test/   (props changed)
    lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java
    lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java
    lucene/dev/branches/lucene3622/solr/solrj/   (props changed)
    lucene/dev/branches/lucene3622/solr/solrj/src/java/   (props changed)
    lucene/dev/branches/lucene3622/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java
    lucene/dev/branches/lucene3622/solr/solrj/src/java/org/apache/solr/common/params/AppendedSolrParams.java
    lucene/dev/branches/lucene3622/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java
    lucene/dev/branches/lucene3622/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java
    lucene/dev/branches/lucene3622/solr/solrj/src/test/org/apache/solr/common/   (props changed)
    lucene/dev/branches/lucene3622/solr/solrj/src/test/org/apache/solr/common/params/SolrParamTest.java

Modified: lucene/dev/branches/lucene3622/lucene/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/CHANGES.txt?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/CHANGES.txt (original)
+++ lucene/dev/branches/lucene3622/lucene/CHANGES.txt Tue Dec 13 18:23:22 2011
@@ -638,6 +638,10 @@ Optimizations
   boolean clauses are required and instances of TermQuery. 
   (Simon Willnauer, Robert Muir)
   
+* LUCENE-3643: FilteredQuery and IndexSearcher.search(Query, Filter,...)
+  now optimize the special case query instanceof MatchAllDocsQuery to
+  execute as ConstantScoreQuery.  (Uwe Schindler)
+  
 Bug fixes
 
 * LUCENE-2803: The FieldCache can miss values if an entry for a reader
@@ -712,6 +716,10 @@ New Features
 * LUCENE-3593: Added a FieldValueFilter that accepts all documents that either
   have at least one or no value at all in a specific field. (Simon Willnauer,
   Uwe Schindler, Robert Muir)
+
+* LUCENE-3586: CheckIndex and IndexUpgrader allow you to specify the
+  specific FSDirectory implementation to use (with the new -dir-impl
+  command-line option).  (Luca Cavanna via Mike McCandless)
   
 Bug fixes
 
@@ -731,6 +739,10 @@ Bug fixes
 * LUCENE-3641: Fixed MultiReader to correctly propagate readerFinishedListeners
   to clones/reopened readers.  (Uwe Schindler)
 
+* LUCENE-3642: Fixed bugs in CharTokenizer, n-gram filters, and smart chinese 
+  where they would create invalid offsets in some situations, leading to problems
+  in highlighting. (Max Beutel via Robert Muir)
+
 Documentation
 
 * LUCENE-3597: Fixed incorrect grouping documentation. (Martijn van Groningen, Robert Muir)

Modified: lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/CheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/CheckIndex.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/CheckIndex.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/CheckIndex.java Tue Dec 13 18:23:22 2011
@@ -28,7 +28,6 @@ import org.apache.lucene.document.Docume
 import org.apache.lucene.index.DocValues.SortedSource;
 import org.apache.lucene.index.DocValues.Source;
 import org.apache.lucene.index.codecs.Codec;
-import org.apache.lucene.index.codecs.PerDocProducer;
 
 import java.io.File;
 import java.io.IOException;
@@ -41,10 +40,21 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.lucene.document.FieldType; // for javadocs
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.codecs.Codec;
+
 import org.apache.lucene.index.codecs.BlockTreeTermsReader;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CommandLineUtil;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.StringHelper;
 
@@ -1477,41 +1487,48 @@ public class CheckIndex {
     boolean verbose = false;
     List<String> onlySegments = new ArrayList<String>();
     String indexPath = null;
+    String dirImpl = null;
     int i = 0;
     while(i < args.length) {
-      if (args[i].equals("-fix")) {
+      String arg = args[i];
+      if ("-fix".equals(arg)) {
         doFix = true;
-        i++;
-      } else if (args[i].equals("-codec")) {
+      } else if ("-codec".equals(arg)) {
         if (i == args.length-1) {
           System.out.println("ERROR: missing name for -codec option");
           System.exit(1);
         }
-        codec = Codec.forName(args[i+1]);
-        i+=2;
-      } else if (args[i].equals("-verbose")) {
-        verbose = true;
         i++;
-      } else if (args[i].equals("-segment")) {
+        codec = Codec.forName(args[i]);
+      } else if (arg.equals("-verbose")) {
+        verbose = true;
+      } else if (arg.equals("-segment")) {
         if (i == args.length-1) {
           System.out.println("ERROR: missing name for -segment option");
           System.exit(1);
         }
-        onlySegments.add(args[i+1]);
-        i += 2;
+        i++;
+        onlySegments.add(args[i]);
+      } else if ("-dir-impl".equals(arg)) {
+        if (i == args.length - 1) {
+          System.out.println("ERROR: missing value for -dir-impl option");
+          System.exit(1);
+        }
+        i++;
+        dirImpl = args[i];
       } else {
         if (indexPath != null) {
           System.out.println("ERROR: unexpected extra argument '" + args[i] + "'");
           System.exit(1);
         }
         indexPath = args[i];
-        i++;
       }
+      i++;
     }
 
     if (indexPath == null) {
       System.out.println("\nERROR: index path not specified");
-      System.out.println("\nUsage: java org.apache.lucene.index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]\n" +
+      System.out.println("\nUsage: java org.apache.lucene.index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y] [-dir-impl X]\n" +
                          "\n" +
                          "  -fix: actually write a new segments_N file, removing any problematic segments\n" +
                          "  -codec X: when fixing, codec to write the new segments_N file with\n" +
@@ -1519,7 +1536,8 @@ public class CheckIndex {
                          "  -segment X: only check the specified segments.  This can be specified multiple\n" + 
                          "              times, to check more than one segment, eg '-segment _2 -segment _a'.\n" +
                          "              You can't use this with the -fix option\n" +
-                         "\n" + 
+                         "  -dir-impl X: use a specific " + FSDirectory.class.getSimpleName() + " implementation. " +
+                         		"If no package is specified the " + FSDirectory.class.getPackage().getName() + " package will be used.\n" +
                          "**WARNING**: -fix should only be used on an emergency basis as it will cause\n" +
                          "documents (perhaps many) to be permanently removed from the index.  Always make\n" +
                          "a backup copy of your index before running this!  Do not run this tool on an index\n" +
@@ -1549,7 +1567,11 @@ public class CheckIndex {
     System.out.println("\nOpening index @ " + indexPath + "\n");
     Directory dir = null;
     try {
-      dir = FSDirectory.open(new File(indexPath));
+      if (dirImpl == null) {
+        dir = FSDirectory.open(new File(indexPath));
+      } else {
+        dir = CommandLineUtil.newFSDirectory(dirImpl, new File(indexPath));
+      }
     } catch (Throwable t) {
       System.out.println("ERROR: could not open directory \"" + indexPath + "\"; exiting");
       t.printStackTrace(System.out);

Modified: lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexReader.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexReader.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexReader.java Tue Dec 13 18:23:22 2011
@@ -34,6 +34,7 @@ import org.apache.lucene.store.*;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CommandLineUtil;
 import org.apache.lucene.util.ReaderUtil;         // for javadocs
 
 /** IndexReader is an abstract class, providing an interface for accessing an
@@ -965,17 +966,28 @@ public abstract class IndexReader implem
   public static void main(String [] args) {
     String filename = null;
     boolean extract = false;
+    String dirImpl = null;
 
-    for (int i = 0; i < args.length; ++i) {
-      if (args[i].equals("-extract")) {
+    int j = 0;
+    while(j < args.length) {
+      String arg = args[j];
+      if ("-extract".equals(arg)) {
         extract = true;
+      } else if ("-dir-impl".equals(arg)) {
+        if (j == args.length - 1) {
+          System.out.println("ERROR: missing value for -dir-impl option");
+          System.exit(1);
+        }
+        j++;
+        dirImpl = args[j];
       } else if (filename == null) {
-        filename = args[i];
+        filename = arg;
       }
+      j++;
     }
 
     if (filename == null) {
-      System.out.println("Usage: org.apache.lucene.index.IndexReader [-extract] <cfsfile>");
+      System.out.println("Usage: org.apache.lucene.index.IndexReader [-extract] [-dir-impl X] <cfsfile>");
       return;
     }
 
@@ -987,7 +999,12 @@ public abstract class IndexReader implem
       File file = new File(filename);
       String dirname = file.getAbsoluteFile().getParent();
       filename = file.getName();
-      dir = FSDirectory.open(new File(dirname));
+      if (dirImpl == null) {
+        dir = FSDirectory.open(new File(dirname));
+      } else {
+        dir = CommandLineUtil.newFSDirectory(dirImpl, new File(dirname));
+      }
+      
       cfr = new CompoundFileDirectory(dir, filename, IOContext.DEFAULT, false);
 
       String [] files = cfr.listAll();

Modified: lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java Tue Dec 13 18:23:22 2011
@@ -19,6 +19,7 @@ package org.apache.lucene.index;
 
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.util.CommandLineUtil;
 import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.Version;
@@ -54,36 +55,56 @@ public final class IndexUpgrader {
   private static void printUsage() {
     System.err.println("Upgrades an index so all segments created with a previous Lucene version are rewritten.");
     System.err.println("Usage:");
-    System.err.println("  java " + IndexUpgrader.class.getName() + " [-delete-prior-commits] [-verbose] indexDir");
+    System.err.println("  java " + IndexUpgrader.class.getName() + " [-delete-prior-commits] [-verbose] [-dir-impl X] indexDir");
     System.err.println("This tool keeps only the last commit in an index; for this");
     System.err.println("reason, if the incoming index has more than one commit, the tool");
     System.err.println("refuses to run by default. Specify -delete-prior-commits to override");
     System.err.println("this, allowing the tool to delete all but the last commit.");
+    System.err.println("Specify a " + FSDirectory.class.getSimpleName() + 
+        " implementation through the -dir-impl option to force its use. If no package is specified the " 
+        + FSDirectory.class.getPackage().getName() + " package will be used.");
     System.err.println("WARNING: This tool may reorder document IDs!");
     System.exit(1);
   }
 
   @SuppressWarnings("deprecation")
   public static void main(String[] args) throws IOException {
-    String dir = null;
+    String path = null;
     boolean deletePriorCommits = false;
     PrintStream out = null;
-    for (String arg : args) {
+    String dirImpl = null;
+    int i = 0;
+    while (i<args.length) {
+      String arg = args[i];
       if ("-delete-prior-commits".equals(arg)) {
         deletePriorCommits = true;
       } else if ("-verbose".equals(arg)) {
         out = System.out;
-      } else if (dir == null) {
-        dir = arg;
-      } else {
+      } else if (path == null) {
+        path = arg;
+      } else if ("-dir-impl".equals(arg)) {
+        if (i == args.length - 1) {
+          System.out.println("ERROR: missing value for -dir-impl option");
+          System.exit(1);
+        }
+        i++;
+        dirImpl = args[i];
+      }else {
         printUsage();
       }
+      i++;
     }
-    if (dir == null) {
+    if (path == null) {
       printUsage();
     }
     
-    new IndexUpgrader(FSDirectory.open(new File(dir)), Version.LUCENE_CURRENT, out, deletePriorCommits).upgrade();
+    Directory dir = null;
+    if (dirImpl == null) {
+      dir = FSDirectory.open(new File(path));
+    } else {
+      dir = CommandLineUtil.newFSDirectory(dirImpl, new File(path));
+    }
+    new IndexUpgrader(dir, Version.LUCENE_CURRENT, out, deletePriorCommits).upgrade();
   }
   
   private final Directory dir;

Modified: lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java Tue Dec 13 18:23:22 2011
@@ -38,28 +38,27 @@ public class CachingWrapperFilter extend
   Filter filter;
 
   protected final FilterCache<DocIdSet> cache;
+  private final boolean recacheDeletes;
 
-  static class FilterCache<T> {
+  private static class FilterCache<T> {
 
     /**
      * A transient Filter cache (package private because of test)
      */
-    // NOTE: not final so that we can dynamically re-init
-    // after de-serialize
-    transient Map<Object,T> cache;
+    private final Map<Object,Map<Object,T>> cache = new WeakHashMap<Object,Map<Object,T>>();
 
-    public synchronized T get(IndexReader reader, Object coreKey) throws IOException {
-      T value;
-
-      if (cache == null) {
-        cache = new WeakHashMap<Object,T>();
+    public synchronized T get(IndexReader reader, Object coreKey, Object coreSubKey) throws IOException {
+      Map<Object,T> innerCache = cache.get(coreKey);
+      if (innerCache == null) {
+        innerCache = new WeakHashMap<Object,T>();
+        cache.put(coreKey, innerCache);
       }
 
-      return cache.get(coreKey);
+      return innerCache.get(coreSubKey);
     }
 
-    public synchronized void put(Object coreKey, T value) {
-      cache.put(coreKey, value);
+    public synchronized void put(Object coreKey, Object coreSubKey, T value) {
+      cache.get(coreKey).put(coreSubKey, value);
     }
   }
 
@@ -67,7 +66,19 @@ public class CachingWrapperFilter extend
    * @param filter Filter to cache results of
    */
   public CachingWrapperFilter(Filter filter) {
+    this(filter, false);
+  }
+
+  /** Wraps another filter's result and caches it.  If
+   *  recacheDeletes is true, then new deletes (for example
+   *  after {@link IndexReader#openIfChanged}) will be AND'd
+   *  and cached again.
+   *
+   *  @param filter Filter to cache results of
+   */
+  public CachingWrapperFilter(Filter filter, boolean recacheDeletes) {
     this.filter = filter;
+    this.recacheDeletes = recacheDeletes;
     cache = new FilterCache<DocIdSet>();
   }
 
@@ -106,33 +117,48 @@ public class CachingWrapperFilter extend
     final IndexReader reader = context.reader;
     final Object coreKey = reader.getCoreCacheKey();
 
-    DocIdSet docIdSet = cache.get(reader, coreKey);
+    // Only cache if incoming acceptDocs is == live docs;
+    // if Lucene passes in more interesting acceptDocs in
+    // the future we don't want to over-cache:
+    final boolean doCacheSubAcceptDocs = recacheDeletes && acceptDocs == reader.getLiveDocs();
+
+    final Bits subAcceptDocs;
+    if (doCacheSubAcceptDocs) {
+      subAcceptDocs = acceptDocs;
+    } else {
+      subAcceptDocs = null;
+    }
+
+    DocIdSet docIdSet = cache.get(reader, coreKey, subAcceptDocs);
     if (docIdSet != null) {
       hitCount++;
     } else {
       missCount++;
-      // cache miss: we use no acceptDocs here
-      // (this saves time on building DocIdSet, the acceptDocs will be applied on the cached set)
-      docIdSet = docIdSetToCache(filter.getDocIdSet(context, null/**!!!*/), reader);
-      cache.put(coreKey, docIdSet);
+      docIdSet = docIdSetToCache(filter.getDocIdSet(context, subAcceptDocs), reader);
+      cache.put(coreKey, subAcceptDocs, docIdSet);
+    }
+
+    if (doCacheSubAcceptDocs) {
+      return docIdSet;
+    } else {
+      return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
     }
-    
-    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
   }
 
   @Override
   public String toString() {
-    return "CachingWrapperFilter("+filter+")";
+    return "CachingWrapperFilter("+filter+",recacheDeletes=" + recacheDeletes + ")";
   }
 
   @Override
   public boolean equals(Object o) {
     if (!(o instanceof CachingWrapperFilter)) return false;
-    return this.filter.equals(((CachingWrapperFilter)o).filter);
+    final CachingWrapperFilter other = (CachingWrapperFilter) o;
+    return this.filter.equals(other.filter) && this.recacheDeletes == other.recacheDeletes;
   }
 
   @Override
   public int hashCode() {
-    return filter.hashCode() ^ 0x1117BF25;  
+    return (filter.hashCode() ^ 0x1117BF25) + (recacheDeletes ? 0 : 1);
   }
 }

Modified: lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/FilteredQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/FilteredQuery.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/FilteredQuery.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/java/org/apache/lucene/search/FilteredQuery.java Tue Dec 13 18:23:22 2011
@@ -33,25 +33,23 @@ import java.util.Set;
  * <p>Note: the bits are retrieved from the filter each time this
  * query is used in a search - use a CachingWrapperFilter to avoid
  * regenerating the bits every time.
- *
- * <p>Created: Apr 20, 2004 8:58:29 AM
- *
  * @since   1.4
  * @see     CachingWrapperFilter
  */
-public class FilteredQuery
-extends Query {
+public class FilteredQuery extends Query {
 
-  Query query;
-  Filter filter;
+  private final Query query;
+  private final Filter filter;
 
   /**
    * Constructs a new query which applies a filter to the results of the original query.
-   * Filter.getDocIdSet() will be called every time this query is used in a search.
+   * {@link Filter#getDocIdSet} will be called every time this query is used in a search.
    * @param query  Query to be filtered, cannot be <code>null</code>.
    * @param filter Filter to apply to query results, cannot be <code>null</code>.
    */
   public FilteredQuery (Query query, Filter filter) {
+    if (query == null || filter == null)
+      throw new IllegalArgumentException("Query and filter cannot be null.");
     this.query = query;
     this.filter = filter;
   }
@@ -229,31 +227,45 @@ extends Query {
     };
   }
 
-  /** Rewrites the wrapped query. */
+  /** Rewrites the query. If the wrapped is an instance of
+   * {@link MatchAllDocsQuery} it returns a {@link ConstantScoreQuery}. Otherwise
+   * it returns a new {@code FilteredQuery} wrapping the rewritten query. */
   @Override
   public Query rewrite(IndexReader reader) throws IOException {
-    Query rewritten = query.rewrite(reader);
-    if (rewritten != query) {
-      FilteredQuery clone = (FilteredQuery)this.clone();
-      clone.query = rewritten;
-      return clone;
+    final Query queryRewritten = query.rewrite(reader);
+    
+    if (queryRewritten instanceof MatchAllDocsQuery) {
+      // Special case: If the query is a MatchAllDocsQuery, we only
+      // return a CSQ(filter).
+      final Query rewritten = new ConstantScoreQuery(filter);
+      // Combine boost of MatchAllDocsQuery and the wrapped rewritten query:
+      rewritten.setBoost(this.getBoost() * queryRewritten.getBoost());
+      return rewritten;
+    }
+    
+    if (queryRewritten != query) {
+      // rewrite to a new FilteredQuery wrapping the rewritten query
+      final Query rewritten = new FilteredQuery(queryRewritten, filter);
+      rewritten.setBoost(this.getBoost());
+      return rewritten;
     } else {
+      // nothing to rewrite, we are done!
       return this;
     }
   }
 
-  public Query getQuery() {
+  public final Query getQuery() {
     return query;
   }
 
-  public Filter getFilter() {
+  public final Filter getFilter() {
     return filter;
   }
 
   // inherit javadoc
   @Override
   public void extractTerms(Set<Term> terms) {
-      getQuery().extractTerms(terms);
+    getQuery().extractTerms(terms);
   }
 
   /** Prints a user-readable version of this query. */
@@ -271,16 +283,21 @@ extends Query {
   /** Returns true iff <code>o</code> is equal to this. */
   @Override
   public boolean equals(Object o) {
-    if (o instanceof FilteredQuery) {
-      FilteredQuery fq = (FilteredQuery) o;
-      return (query.equals(fq.query) && filter.equals(fq.filter) && getBoost()==fq.getBoost());
-    }
-    return false;
+    if (o == this)
+      return true;
+    if (!super.equals(o))
+      return false;
+    assert o instanceof FilteredQuery;
+    final FilteredQuery fq = (FilteredQuery) o;
+    return fq.query.equals(this.query) && fq.filter.equals(this.filter);
   }
 
   /** Returns a hash code value for this object. */
   @Override
   public int hashCode() {
-    return query.hashCode() ^ filter.hashCode() + Float.floatToRawIntBits(getBoost());
+    int hash = super.hashCode();
+    hash = hash * 31 + query.hashCode();
+    hash = hash * 31 + filter.hashCode();
+    return hash;
   }
 }

Modified: lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java Tue Dec 13 18:23:22 2011
@@ -135,6 +135,10 @@ public abstract class BaseTokenStreamTes
         assertTrue("startOffset must be >= 0", offsetAtt.startOffset() >= 0);
         assertTrue("endOffset must be >= 0", offsetAtt.endOffset() >= 0);
         assertTrue("endOffset must be >= startOffset", offsetAtt.endOffset() >= offsetAtt.startOffset());
+        if (finalOffset != null) {
+          assertTrue("startOffset must be <= finalOffset", offsetAtt.startOffset() <= finalOffset.intValue());
+          assertTrue("endOffset must be <= finalOffset", offsetAtt.endOffset() <= finalOffset.intValue());
+        }
       }
       if (posIncrAtt != null) {
         assertTrue("posIncrement must be >= 0", posIncrAtt.getPositionIncrement() >= 0);

Modified: lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java Tue Dec 13 18:23:22 2011
@@ -79,8 +79,8 @@ public class QueryUtils {
   }
 
   public static void checkUnequal(Query q1, Query q2) {
-    Assert.assertTrue(!q1.equals(q2));
-    Assert.assertTrue(!q2.equals(q1));
+    Assert.assertFalse(q1 + " equal to " + q2, q1.equals(q2));
+    Assert.assertFalse(q2 + " equal to " + q1, q2.equals(q1));
 
     // possible this test can fail on a hash collision... if that
     // happens, please change test to use a different example.

Modified: lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java Tue Dec 13 18:23:22 2011
@@ -26,7 +26,6 @@ import java.lang.annotation.Inherited;
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
-import java.lang.reflect.Constructor;
 import java.util.*;
 import java.util.Map.Entry;
 import java.util.concurrent.ExecutorService;
@@ -1035,24 +1034,16 @@ public abstract class LuceneTestCase ext
       fsdirClass = FS_DIRECTORIES[random.nextInt(FS_DIRECTORIES.length)];
     }
 
-    if (fsdirClass.indexOf(".") == -1) {// if not fully qualified, assume .store
-      fsdirClass = "org.apache.lucene.store." + fsdirClass;
-    }
-
     Class<? extends FSDirectory> clazz;
     try {
       try {
-        clazz = Class.forName(fsdirClass).asSubclass(FSDirectory.class);
+        clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass);
       } catch (ClassCastException e) {
         // TEST_DIRECTORY is not a sub-class of FSDirectory, so draw one at random
         fsdirClass = FS_DIRECTORIES[random.nextInt(FS_DIRECTORIES.length)];
-
-        if (fsdirClass.indexOf(".") == -1) {// if not fully qualified, assume .store
-          fsdirClass = "org.apache.lucene.store." + fsdirClass;
-        }
-
-        clazz = Class.forName(fsdirClass).asSubclass(FSDirectory.class);
+        clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass);
       }
+      
       MockDirectoryWrapper dir = new MockDirectoryWrapper(random, newFSDirectoryImpl(clazz, f));
       if (lf != null) {
         dir.setLockFactory(lf);
@@ -1165,10 +1156,7 @@ public abstract class LuceneTestCase ext
       throws IOException {
     FSDirectory d = null;
     try {
-      // Assuming every FSDirectory has a ctor(File), but not all may take a
-      // LockFactory too, so setting it afterwards.
-      Constructor<? extends FSDirectory> ctor = clazz.getConstructor(File.class);
-      d = ctor.newInstance(file);
+      d = CommandLineUtil.newFSDirectory(clazz, file);
     } catch (Exception e) {
       d = FSDirectory.open(file);
     }
@@ -1186,12 +1174,12 @@ public abstract class LuceneTestCase ext
   }
   
   static Directory newDirectoryImpl(Random random, String clazzName) {
-    if (clazzName.equals("random"))
+    if (clazzName.equals("random")) {
       clazzName = randomDirectory(random);
-    if (clazzName.indexOf(".") == -1) // if not fully qualified, assume .store
-      clazzName = "org.apache.lucene.store." + clazzName;
+    }
+    
     try {
-      final Class<? extends Directory> clazz = Class.forName(clazzName).asSubclass(Directory.class);
+      final Class<? extends Directory> clazz = CommandLineUtil.loadDirectoryClass(clazzName);
       // If it is a FSDirectory type, try its ctor(File)
       if (FSDirectory.class.isAssignableFrom(clazz)) {
         final File dir = _TestUtil.getTempDir("index");

Modified: lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java Tue Dec 13 18:23:22 2011
@@ -30,8 +30,9 @@ import org.apache.lucene.index.SlowMulti
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
 
 public class TestCachingWrapperFilter extends LuceneTestCase {
   
@@ -164,6 +165,7 @@ public class TestCachingWrapperFilter ex
             // asserts below requires no unexpected merges:
             setMergePolicy(newLogMergePolicy(10))
     );
+    _TestUtil.keepFullyDeletedSegments(writer.w);
 
     // NOTE: cannot use writer.getReader because RIW (on
     // flipping a coin) may give us a newly opened reader,
@@ -173,7 +175,7 @@ public class TestCachingWrapperFilter ex
     // same reason we don't wrap?
     IndexSearcher searcher = newSearcher(reader, false);
 
-    // add a doc, refresh the reader, and check that its there
+    // add a doc, refresh the reader, and check that it's there
     Document doc = new Document();
     doc.add(newField("id", "1", StringField.TYPE_STORED));
     writer.addDocument(doc);
@@ -186,25 +188,78 @@ public class TestCachingWrapperFilter ex
 
     final Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));
 
-    CachingWrapperFilter filter = new CachingWrapperFilter(startFilter);
+    // force cache to regenerate after deletions:
+    CachingWrapperFilter filter = new CachingWrapperFilter(startFilter, true);
 
     docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+
     assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
-    int missCount = filter.missCount;
-    assertTrue(missCount > 0);
+
     Query constantScore = new ConstantScoreQuery(filter);
     docs = searcher.search(constantScore, 1);
     assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+    // make sure we get a cache hit when we reopen reader
+    // that had no change to deletions
+
+    // fake delete (deletes nothing):
+    writer.deleteDocuments(new Term("foo", "bar"));
+
+    IndexReader oldReader = reader;
+    reader = refreshReader(reader);
+    assertTrue(reader == oldReader);
+    int missCount = filter.missCount;
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+
+    // cache hit:
     assertEquals(missCount, filter.missCount);
 
+    // now delete the doc, refresh the reader, and see that it's not there
+    writer.deleteDocuments(new Term("id", "1"));
+
     // NOTE: important to hold ref here so GC doesn't clear
     // the cache entry!  Else the assert below may sometimes
     // fail:
-    IndexReader oldReader = reader;
+    oldReader = reader;
+    reader = refreshReader(reader);
+
+    searcher = newSearcher(reader, false);
+
+    missCount = filter.missCount;
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
 
+    // cache miss, because we asked CWF to recache when
+    // deletes changed:
+    assertEquals(missCount+1, filter.missCount);
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
+
+    // apply deletes dynamically:
+    filter = new CachingWrapperFilter(startFilter);
     writer.addDocument(doc);
     reader = refreshReader(reader);
     searcher = newSearcher(reader, false);
+
+    docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
+    assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
+    missCount = filter.missCount;
+    assertTrue(missCount > 0);
+    constantScore = new ConstantScoreQuery(filter);
+    docs = searcher.search(constantScore, 1);
+    assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
+    assertEquals(missCount, filter.missCount);
+
+    writer.addDocument(doc);
+
+    // NOTE: important to hold ref here so GC doesn't clear
+    // the cache entry!  Else the assert below may sometimes
+    // fail:
+    oldReader = reader;
+
+    reader = refreshReader(reader);
+    searcher = newSearcher(reader, false);
         
     docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
     assertEquals("[query + filter] Should find 2 hits...", 2, docs.totalHits);
@@ -216,11 +271,6 @@ public class TestCachingWrapperFilter ex
     assertEquals("[just filter] Should find a hit...", 2, docs.totalHits);
     assertEquals(missCount, filter.missCount);
 
-    // NOTE: important to hold ref here so GC doesn't clear
-    // the cache entry!  Else the assert below may sometimes
-    // fail:
-    IndexReader oldReader2 = reader;
-
     // now delete the doc, refresh the reader, and see that it's not there
     writer.deleteDocuments(new Term("id", "1"));
 
@@ -229,10 +279,12 @@ public class TestCachingWrapperFilter ex
 
     docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
     assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
+    // CWF reused the same entry (it dynamically applied the deletes):
     assertEquals(missCount, filter.missCount);
 
     docs = searcher.search(constantScore, 1);
     assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);
+    // CWF reused the same entry (it dynamically applied the deletes):
     assertEquals(missCount, filter.missCount);
 
     // NOTE: silliness to make sure JRE does not eliminate
@@ -240,7 +292,6 @@ public class TestCachingWrapperFilter ex
     // CachingWrapperFilter's WeakHashMap from dropping the
     // entry:
     assertTrue(oldReader != null);
-    assertTrue(oldReader2 != null);
 
     reader.close();
     writer.close();

Modified: lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (original)
+++ lucene/dev/branches/lucene3622/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java Tue Dec 13 18:23:22 2011
@@ -132,6 +132,11 @@ public class TestFilteredQuery extends L
     assertEquals (2, hits.length);
     QueryUtils.check(random, filteredquery,searcher);
 
+    filteredquery = new FilteredQueryRA(new MatchAllDocsQuery(), filter, useRandomAccess);
+    hits = searcher.search (filteredquery, null, 1000).scoreDocs;
+    assertEquals (2, hits.length);
+    QueryUtils.check(random, filteredquery,searcher);
+
     filteredquery = new FilteredQueryRA(new TermQuery (new Term ("field", "x")), filter, useRandomAccess);
     hits = searcher.search (filteredquery, null, 1000).scoreDocs;
     assertEquals (1, hits.length);
@@ -220,9 +225,9 @@ public class TestFilteredQuery extends L
 
   private void tBooleanMUST(final boolean useRandomAccess) throws Exception {
     BooleanQuery bq = new BooleanQuery();
-    Query query = new FilteredQueryRA(new MatchAllDocsQuery(), new SingleDocTestFilter(0), useRandomAccess);
+    Query query = new FilteredQueryRA(new TermQuery(new Term("field", "one")), new SingleDocTestFilter(0), useRandomAccess);
     bq.add(query, BooleanClause.Occur.MUST);
-    query = new FilteredQueryRA(new MatchAllDocsQuery(), new SingleDocTestFilter(1), useRandomAccess);
+    query = new FilteredQueryRA(new TermQuery(new Term("field", "one")), new SingleDocTestFilter(1), useRandomAccess);
     bq.add(query, BooleanClause.Occur.MUST);
     ScoreDoc[] hits = searcher.search(bq, null, 1000).scoreDocs;
     assertEquals(0, hits.length);
@@ -238,9 +243,9 @@ public class TestFilteredQuery extends L
 
   private void tBooleanSHOULD(final boolean useRandomAccess) throws Exception {
     BooleanQuery bq = new BooleanQuery();
-    Query query = new FilteredQueryRA(new MatchAllDocsQuery(), new SingleDocTestFilter(0), useRandomAccess);
+    Query query = new FilteredQueryRA(new TermQuery(new Term("field", "one")), new SingleDocTestFilter(0), useRandomAccess);
     bq.add(query, BooleanClause.Occur.SHOULD);
-    query = new FilteredQueryRA(new MatchAllDocsQuery(), new SingleDocTestFilter(1), useRandomAccess);
+    query = new FilteredQueryRA(new TermQuery(new Term("field", "one")), new SingleDocTestFilter(1), useRandomAccess);
     bq.add(query, BooleanClause.Occur.SHOULD);
     ScoreDoc[] hits = searcher.search(bq, null, 1000).scoreDocs;
     assertEquals(2, hits.length);
@@ -288,6 +293,76 @@ public class TestFilteredQuery extends L
     assertEquals(1, hits.length);
     QueryUtils.check(random, query, searcher);    
   }
+  
+  public void testEqualsHashcode() throws Exception {
+    // some tests before, if the used queries and filters work:
+    assertEquals(new PrefixFilter(new Term("field", "o")), new PrefixFilter(new Term("field", "o")));
+    assertFalse(new PrefixFilter(new Term("field", "a")).equals(new PrefixFilter(new Term("field", "o"))));
+    QueryUtils.checkHashEquals(new TermQuery(new Term("field", "one")));
+    QueryUtils.checkUnequal(
+      new TermQuery(new Term("field", "one")), new TermQuery(new Term("field", "two"))
+    );
+    // now test FilteredQuery equals/hashcode:
+    QueryUtils.checkHashEquals(new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o"))));
+    QueryUtils.checkUnequal(
+      new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o"))), 
+      new FilteredQuery(new TermQuery(new Term("field", "two")), new PrefixFilter(new Term("field", "o")))
+    );
+    QueryUtils.checkUnequal(
+      new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "a"))), 
+      new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o")))
+    );
+  }
+  
+  public void testInvalidArguments() throws Exception {
+    try {
+      new FilteredQuery(null, null);
+      fail("Should throw IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {
+      // pass
+    }
+    try {
+      new FilteredQuery(new TermQuery(new Term("field", "one")), null);
+      fail("Should throw IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {
+      // pass
+    }
+    try {
+      new FilteredQuery(null, new PrefixFilter(new Term("field", "o")));
+      fail("Should throw IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {
+      // pass
+    }
+  }
+  
+  private void assertRewrite(FilteredQuery fq, Class<? extends Query> clazz) throws Exception {
+    // assign crazy boost to FQ
+    final float boost = random.nextFloat() * 100.f;
+    fq.setBoost(boost);
+    
+    // assign crazy boost to inner
+    final float innerBoost = random.nextFloat() * 100.f;
+    fq.getQuery().setBoost(innerBoost);
+    
+    // check the class and boosts of rewritten query
+    final Query rewritten = searcher.rewrite(fq);
+    assertTrue("is not instance of " + clazz.getName(), clazz.isInstance(rewritten));
+    if (rewritten instanceof FilteredQuery) {
+      assertEquals(boost, rewritten.getBoost(), 1.E-5f);
+      assertEquals(innerBoost, ((FilteredQuery) rewritten).getQuery().getBoost(), 1.E-5f);
+    } else {
+      assertEquals(boost * innerBoost, rewritten.getBoost(), 1.E-5f);
+    }
+    
+    // check that the original query was not modified
+    assertEquals(boost, fq.getBoost(), 1.E-5f);
+    assertEquals(innerBoost, fq.getQuery().getBoost(), 1.E-5f);
+  }
+
+  public void testRewrite() throws Exception {
+    assertRewrite(new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o"))), FilteredQuery.class);
+    assertRewrite(new FilteredQuery(new MatchAllDocsQuery(), new PrefixFilter(new Term("field", "o"))), ConstantScoreQuery.class);
+  }
 
   public static final class FilteredQueryRA extends FilteredQuery {
     private final boolean useRandomAccess;

Modified: lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (original)
+++ lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java Tue Dec 13 18:23:22 2011
@@ -71,6 +71,8 @@ public final class EdgeNGramTokenFilter 
   private int curTermLength;
   private int curGramSize;
   private int tokStart;
+  private int tokEnd; // only used if the length changed before this filter
+  private boolean hasIllegalOffsets; // only if the length changed before this filter
   
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@@ -126,6 +128,10 @@ public final class EdgeNGramTokenFilter 
           curTermLength = termAtt.length();
           curGramSize = minGram;
           tokStart = offsetAtt.startOffset();
+          tokEnd = offsetAtt.endOffset();
+          // if length by start + end offsets doesn't match the term text then assume
+          // this is a synonym and don't adjust the offsets.
+          hasIllegalOffsets = (tokStart + curTermLength) != tokEnd;
         }
       }
       if (curGramSize <= maxGram) {
@@ -135,7 +141,11 @@ public final class EdgeNGramTokenFilter 
           int start = side == Side.FRONT ? 0 : curTermLength - curGramSize;
           int end = start + curGramSize;
           clearAttributes();
-          offsetAtt.setOffset(tokStart + start, tokStart + end);
+          if (hasIllegalOffsets) {
+            offsetAtt.setOffset(tokStart, tokEnd);
+          } else {
+            offsetAtt.setOffset(tokStart + start, tokStart + end);
+          }
           termAtt.copyBuffer(curTermBuffer, start, curGramSize);
           curGramSize++;
           return true;

Modified: lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java (original)
+++ lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java Tue Dec 13 18:23:22 2011
@@ -38,6 +38,8 @@ public final class NGramTokenFilter exte
   private int curGramSize;
   private int curPos;
   private int tokStart;
+  private int tokEnd; // only used if the length changed before this filter
+  private boolean hasIllegalOffsets; // only if the length changed before this filter
   
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@@ -81,13 +83,21 @@ public final class NGramTokenFilter exte
           curGramSize = minGram;
           curPos = 0;
           tokStart = offsetAtt.startOffset();
+          tokEnd = offsetAtt.endOffset();
+          // if length by start + end offsets doesn't match the term text then assume
+          // this is a synonym and don't adjust the offsets.
+          hasIllegalOffsets = (tokStart + curTermLength) != tokEnd;
         }
       }
       while (curGramSize <= maxGram) {
         while (curPos+curGramSize <= curTermLength) {     // while there is input
           clearAttributes();
           termAtt.copyBuffer(curTermBuffer, curPos, curGramSize);
-          offsetAtt.setOffset(tokStart + curPos, tokStart + curPos + curGramSize);
+          if (hasIllegalOffsets) {
+            offsetAtt.setOffset(tokStart, tokEnd);
+          } else {
+            offsetAtt.setOffset(tokStart + curPos, tokStart + curPos + curGramSize);
+          }
           curPos++;
           return true;
         }

Modified: lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java (original)
+++ lucene/dev/branches/lucene3622/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java Tue Dec 13 18:23:22 2011
@@ -144,6 +144,7 @@ public abstract class CharTokenizer exte
     clearAttributes();
     int length = 0;
     int start = -1; // this variable is always initialized
+    int end = -1;
     char[] buffer = termAtt.buffer();
     while (true) {
       if (bufferIndex >= dataLen) {
@@ -162,15 +163,18 @@ public abstract class CharTokenizer exte
       }
       // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone
       final int c = charUtils.codePointAt(ioBuffer.getBuffer(), bufferIndex);
-      bufferIndex += Character.charCount(c);
+      final int charCount = Character.charCount(c);
+      bufferIndex += charCount;
 
       if (isTokenChar(c)) {               // if it's a token char
         if (length == 0) {                // start of token
           assert start == -1;
-          start = offset + bufferIndex - 1;
+          start = offset + bufferIndex - charCount;
+          end = start;
         } else if (length >= buffer.length-1) { // check if a supplementary could run out of bounds
           buffer = termAtt.resizeBuffer(2+length); // make sure a supplementary fits in the buffer
         }
+        end += charCount;
         length += Character.toChars(normalize(c), buffer, length); // buffer it, normalized
         if (length >= MAX_WORD_LEN) // buffer overflow! make sure to check for >= surrogate pair could break == test
           break;
@@ -180,7 +184,7 @@ public abstract class CharTokenizer exte
 
     termAtt.setLength(length);
     assert start != -1;
-    offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(start+length));
+    offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(end));
     return true;
     
   }

Modified: lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (original)
+++ lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java Tue Dec 13 18:23:22 2011
@@ -17,11 +17,16 @@ package org.apache.lucene.analysis.ngram
  * limitations under the License.
  */
 
+import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
 
+import java.io.Reader;
 import java.io.StringReader;
 
 /**
@@ -104,4 +109,24 @@ public class EdgeNGramTokenFilterTest ex
     tokenizer.reset(new StringReader("abcde"));
     assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
   }
+  
+  // LUCENE-3642
+  // EdgeNgram blindly adds term length to offset, but this can take things out of bounds
+  // wrt original text if a previous filter increases the length of the word (in this case æ -> ae)
+  // so in this case we behave like WDF, and preserve any modified offsets
+  public void testInvalidOffsets() throws Exception {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+        TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
+        filters = new EdgeNGramTokenFilter(filters, EdgeNGramTokenFilter.Side.FRONT, 2, 15);
+        return new TokenStreamComponents(tokenizer, filters);
+      }
+    };
+    assertAnalyzesTo(analyzer, "mosfellsbær",
+        new String[] { "mo", "mos", "mosf", "mosfe", "mosfel", "mosfell", "mosfells", "mosfellsb", "mosfellsba", "mosfellsbae", "mosfellsbaer" },
+        new int[]    {    0,     0,      0,       0,        0,         0,          0,           0,            0,             0,              0 },
+        new int[]    {   11,    11,     11,      11,       11,        11,         11,          11,           11,            11,             11 });
+  }
 }

Modified: lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java (original)
+++ lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java Tue Dec 13 18:23:22 2011
@@ -17,11 +17,16 @@ package org.apache.lucene.analysis.ngram
  * limitations under the License.
  */
 
+import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
 
+import java.io.Reader;
 import java.io.StringReader;
 
 /**
@@ -93,4 +98,24 @@ public class NGramTokenFilterTest extend
       tokenizer.reset(new StringReader("abcde"));
       assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
     }
+    
+    // LUCENE-3642
+    // EdgeNgram blindly adds term length to offset, but this can take things out of bounds
+    // wrt original text if a previous filter increases the length of the word (in this case æ -> ae)
+    // so in this case we behave like WDF, and preserve any modified offsets
+    public void testInvalidOffsets() throws Exception {
+      Analyzer analyzer = new Analyzer() {
+        @Override
+        protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+          Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+          TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
+          filters = new NGramTokenFilter(filters, 2, 2);
+          return new TokenStreamComponents(tokenizer, filters);
+        }
+      };
+      assertAnalyzesTo(analyzer, "mosfellsbær",
+          new String[] { "mo", "os", "sf", "fe", "el", "ll", "ls", "sb", "ba", "ae", "er" },
+          new int[]    {    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
+          new int[]    {   11,   11,   11,   11,   11,   11,   11,   11,   11,   11,   11 });
+    }
 }

Modified: lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java (original)
+++ lucene/dev/branches/lucene3622/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java Tue Dec 13 18:23:22 2011
@@ -18,11 +18,17 @@ package org.apache.lucene.analysis.util;
  */
 
 import java.io.IOException;
+import java.io.Reader;
 import java.io.StringReader;
 
+import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LetterTokenizer;
 import org.apache.lucene.analysis.core.LowerCaseTokenizer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.util._TestUtil;
 
 
 /**
@@ -94,4 +100,80 @@ public class TestCharTokenizers extends 
     Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
     assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
   }
+  
+  // LUCENE-3642: normalize SMP->BMP and check that offsets are correct
+  public void testCrossPlaneNormalization() throws IOException {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        Tokenizer tokenizer = new LetterTokenizer(TEST_VERSION_CURRENT, reader) {
+          @Override
+          protected int normalize(int c) {
+            if (c > 0xffff) {
+              return 'δ';
+            } else {
+              return c;
+            }
+          }
+        };
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+    };
+    int num = 10000 * RANDOM_MULTIPLIER;
+    for (int i = 0; i < num; i++) {
+      String s = _TestUtil.randomUnicodeString(random);
+      TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+      ts.reset();
+      OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
+      while (ts.incrementToken()) {
+        String highlightedText = s.substring(offsetAtt.startOffset(), offsetAtt.endOffset());
+        for (int j = 0, cp = 0; j < highlightedText.length(); j += Character.charCount(cp)) {
+          cp = highlightedText.codePointAt(j);
+          assertTrue("non-letter:" + Integer.toHexString(cp), Character.isLetter(cp));
+        }
+      }
+      ts.end();
+      ts.close();
+    }
+    // just for fun
+    checkRandomData(random, analyzer, num);
+  }
+  
+  // LUCENE-3642: normalize BMP->SMP and check that offsets are correct
+  public void testCrossPlaneNormalization2() throws IOException {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        Tokenizer tokenizer = new LetterTokenizer(TEST_VERSION_CURRENT, reader) {
+          @Override
+          protected int normalize(int c) {
+            if (c <= 0xffff) {
+              return 0x1043C;
+            } else {
+              return c;
+            }
+          }
+        };
+        return new TokenStreamComponents(tokenizer, tokenizer);
+      }
+    };
+    int num = 10000 * RANDOM_MULTIPLIER;
+    for (int i = 0; i < num; i++) {
+      String s = _TestUtil.randomUnicodeString(random);
+      TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+      ts.reset();
+      OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
+      while (ts.incrementToken()) {
+        String highlightedText = s.substring(offsetAtt.startOffset(), offsetAtt.endOffset());
+        for (int j = 0, cp = 0; j < highlightedText.length(); j += Character.charCount(cp)) {
+          cp = highlightedText.codePointAt(j);
+          assertTrue("non-letter:" + Integer.toHexString(cp), Character.isLetter(cp));
+        }
+      }
+      ts.end();
+      ts.close();
+    }
+    // just for fun
+    checkRandomData(random, analyzer, num);
+  }
 }

Modified: lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java (original)
+++ lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java Tue Dec 13 18:23:22 2011
@@ -43,6 +43,10 @@ public final class WordTokenFilter exten
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
   private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+  
+  private int tokStart; // only used if the length changed before this filter
+  private int tokEnd; // only used if the length changed before this filter
+  private boolean hasIllegalOffsets; // only if the length changed before this filter
 
   /**
    * Construct a new WordTokenizer.
@@ -59,6 +63,11 @@ public final class WordTokenFilter exten
     if (tokenIter == null || !tokenIter.hasNext()) {
       // there are no remaining tokens from the current sentence... are there more sentences?
       if (input.incrementToken()) {
+        tokStart = offsetAtt.startOffset();
+        tokEnd = offsetAtt.endOffset();
+        // if length by start + end offsets doesn't match the term text then assume
+        // this is a synonym and don't adjust the offsets.
+        hasIllegalOffsets = (tokStart + termAtt.length()) != tokEnd;
         // a new sentence is available: process it.
         tokenBuffer = wordSegmenter.segmentSentence(termAtt.toString(), offsetAtt.startOffset());
         tokenIter = tokenBuffer.iterator();
@@ -77,7 +86,11 @@ public final class WordTokenFilter exten
     // There are remaining tokens from the current sentence, return the next one. 
     SegToken nextWord = tokenIter.next();
     termAtt.copyBuffer(nextWord.charArray, 0, nextWord.charArray.length);
-    offsetAtt.setOffset(nextWord.startOffset, nextWord.endOffset);
+    if (hasIllegalOffsets) {
+      offsetAtt.setOffset(tokStart, tokEnd);
+    } else {
+      offsetAtt.setOffset(nextWord.startOffset, nextWord.endOffset);
+    }
     typeAtt.setType("word");
     return true;
   }

Modified: lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java (original)
+++ lucene/dev/branches/lucene3622/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java Tue Dec 13 18:23:22 2011
@@ -17,11 +17,16 @@
 
 package org.apache.lucene.analysis.cn.smart;
 
+import java.io.Reader;
 import java.io.StringReader;
 
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
 import org.apache.lucene.util.Version;
 
 public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase {
@@ -196,6 +201,24 @@ public class TestSmartChineseAnalyzer ex
     }
   }
   
+  // LUCENE-3642
+  public void testInvalidOffset() throws Exception {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+        TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
+        filters = new WordTokenFilter(filters);
+        return new TokenStreamComponents(tokenizer, filters);
+      }
+    };
+    
+    assertAnalyzesTo(analyzer, "mosfellsbær", 
+        new String[] { "mosfellsbaer" },
+        new int[]    { 0 },
+        new int[]    { 11 });
+  }
+  
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
     checkRandomData(random, new SmartChineseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER);

Modified: lucene/dev/branches/lucene3622/solr/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/CHANGES.txt?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/CHANGES.txt (original)
+++ lucene/dev/branches/lucene3622/solr/CHANGES.txt Tue Dec 13 18:23:22 2011
@@ -429,6 +429,10 @@ Other Changes
 ----------------------
 * SOLR-2922: Upgrade commons-io and commons-lang to 2.1 and 2.6, respectively. (koji)
 
+* SOLR-2920: Refactor frequent conditional use of DefaultSolrParams and 
+  AppendedSolrParams into factory methods.
+  (David Smiley via hossman)
+
 ==================  3.5.0  ==================
 
 New Features

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/core/SolrCore.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/core/SolrCore.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/core/SolrCore.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/core/SolrCore.java Tue Dec 13 18:23:22 2011
@@ -613,7 +613,6 @@ public final class SolrCore implements S
       resourceLoader.inform( resourceLoader );
       resourceLoader.inform( this );  // last call before the latch is released.
     } catch (Throwable e) {
-      log.error("Error in constructing the core", e);
       latch.countDown();//release the latch, otherwise we block trying to do the close.  This should be fine, since counting down on a latch of 0 is still fine
       //close down the searcher and any other resources, if it exists, as this is not recoverable
       close();

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/GapFragmenter.java Tue Dec 13 18:23:22 2011
@@ -22,7 +22,6 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.search.highlight.Fragmenter;
 import org.apache.lucene.search.highlight.NullFragmenter;
 import org.apache.lucene.search.highlight.SimpleFragmenter;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.HighlightParams;
 import org.apache.solr.common.params.SolrParams;
 
@@ -31,9 +30,7 @@ public class GapFragmenter extends Highl
   public Fragmenter getFragmenter(String fieldName, SolrParams params )
   {
     numRequests++;
-    if( defaults != null ) {
-      params = new DefaultSolrParams( params, defaults );
-    }
+    params = SolrParams.wrapDefaults(params, defaults);
     
     int fragsize = params.getFieldInt( fieldName, HighlightParams.FRAGSIZE, 100 );
     return (fragsize <= 0) ? new NullFragmenter() : new LuceneGapFragmenter(fragsize);

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/HtmlFormatter.java Tue Dec 13 18:23:22 2011
@@ -18,7 +18,6 @@ package org.apache.solr.highlight;
 
 import org.apache.lucene.search.highlight.Formatter;
 import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.HighlightParams;
 import org.apache.solr.common.params.SolrParams;
 
@@ -30,10 +29,8 @@ public class HtmlFormatter extends Highl
   public Formatter getFormatter(String fieldName, SolrParams params ) 
   {
     numRequests++;
-    if( defaults != null ) {
-      params = new DefaultSolrParams( params, defaults );
-    }
-    
+    params = SolrParams.wrapDefaults(params, defaults);
+
     return new SimpleHTMLFormatter(
         params.getFieldParam(fieldName, HighlightParams.SIMPLE_PRE,  "<em>" ), 
         params.getFieldParam(fieldName, HighlightParams.SIMPLE_POST, "</em>"));

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java Tue Dec 13 18:23:22 2011
@@ -26,7 +26,6 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.search.highlight.Fragmenter;
 import org.apache.lucene.search.highlight.NullFragmenter;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.HighlightParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -66,10 +65,9 @@ public class RegexFragmenter extends Hig
 
   public Fragmenter getFragmenter(String fieldName, SolrParams params )
   { 
-    numRequests++;        
-    if( defaults != null ) {
-      params = new DefaultSolrParams( params, defaults );
-    }
+    numRequests++;
+    params = SolrParams.wrapDefaults(params, defaults);
+
     int fragsize  = params.getFieldInt(   fieldName, HighlightParams.FRAGSIZE,  LuceneRegexFragmenter.DEFAULT_FRAGMENT_SIZE );
     int increment = params.getFieldInt(   fieldName, HighlightParams.INCREMENT, LuceneRegexFragmenter.DEFAULT_INCREMENT_GAP );
     float slop    = params.getFieldFloat( fieldName, HighlightParams.SLOP,      LuceneRegexFragmenter.DEFAULT_SLOP );

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SimpleFragListBuilder.java Tue Dec 13 18:23:22 2011
@@ -18,18 +18,18 @@
 package org.apache.solr.highlight;
 
 import org.apache.lucene.search.vectorhighlight.FragListBuilder;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.SolrParams;
 
 public class SimpleFragListBuilder extends HighlightingPluginBase implements
     SolrFragListBuilder {
 
   public FragListBuilder getFragListBuilder(SolrParams params) {
+    // NOTE: This class (currently) makes no use of params
+    // If that ever changes, it should wrap them with defaults...
+    // params = SolrParams.wrapDefaults(params, defaults)
+
     numRequests++;
-    if( defaults != null ) {
-      params = new DefaultSolrParams( params, defaults );
-    }
-    
+
     return new org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder();
   }
 

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SingleFragListBuilder.java Tue Dec 13 18:23:22 2011
@@ -18,18 +18,18 @@
 package org.apache.solr.highlight;
 
 import org.apache.lucene.search.vectorhighlight.FragListBuilder;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.SolrParams;
 
 public class SingleFragListBuilder extends HighlightingPluginBase implements
     SolrFragListBuilder {
 
   public FragListBuilder getFragListBuilder(SolrParams params) {
+    // NOTE: This class (currently) makes no use of params
+    // If that ever changes, it should wrap them with defaults...
+    // params = SolrParams.wrapDefaults(params, defaults)
+
     numRequests++;
-    if( defaults != null ) {
-      params = new DefaultSolrParams( params, defaults );
-    }
-    
+
     return new org.apache.lucene.search.vectorhighlight.SingleFragListBuilder();
   }
 

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrBoundaryScanner.java Tue Dec 13 18:23:22 2011
@@ -18,7 +18,6 @@
 package org.apache.solr.highlight;
 
 import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.core.SolrInfoMBean;
 import org.apache.solr.util.plugin.NamedListInitializedPlugin;
@@ -28,9 +27,8 @@ public abstract class SolrBoundaryScanne
 
   public BoundaryScanner getBoundaryScanner(String fieldName, SolrParams params){
     numRequests++;
-    if( defaults != null ) {
-      params = new DefaultSolrParams( params, defaults );
-    }
+    params = SolrParams.wrapDefaults(params, defaults);
+
     return get(fieldName, params);
   }
   

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/highlight/SolrFragmentsBuilder.java Tue Dec 13 18:23:22 2011
@@ -20,7 +20,6 @@ package org.apache.solr.highlight;
 import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
 import org.apache.lucene.search.vectorhighlight.FragmentsBuilder;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.HighlightParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.core.SolrInfoMBean;
@@ -40,9 +39,8 @@ public abstract class SolrFragmentsBuild
    */
   public FragmentsBuilder getFragmentsBuilder(SolrParams params, BoundaryScanner bs) {
     numRequests++;
-    if( defaults != null ) {
-      params = new DefaultSolrParams( params, defaults );
-    }
+    params = SolrParams.wrapDefaults(params, defaults);
+
     return getFragmentsBuilder( params, getPreTags( params, null ), getPostTags( params, null ), bs );
   }
   
@@ -55,9 +53,8 @@ public abstract class SolrFragmentsBuild
   }
   
   private String[] getTags( SolrParams params, String paramName, String fieldName, String def ){
-    if( defaults != null ) {
-      params = new DefaultSolrParams( params, defaults );
-    }
+    params = SolrParams.wrapDefaults(params, defaults);
+
     String value = null;
     if( fieldName == null )
       value = params.get( paramName, def );

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java Tue Dec 13 18:23:22 2011
@@ -23,7 +23,6 @@ import org.apache.lucene.search.BooleanQ
 import org.apache.lucene.search.Query;
 import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.DisMaxParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -81,7 +80,8 @@ public class DisMaxQParser extends QPars
 
   @Override
   public Query parse() throws ParseException {
-    SolrParams solrParams = localParams == null ? params : new DefaultSolrParams(localParams, params);
+    SolrParams solrParams = SolrParams.wrapDefaults(localParams, params);
+
     queryFields = SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.QF));
     if (0 == queryFields.size()) {
       queryFields.put(req.getSchema().getDefaultSearchFieldName(), 1.0f);

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java Tue Dec 13 18:23:22 2011
@@ -31,8 +31,6 @@ import org.apache.lucene.queryparser.cla
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.search.*;
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.DisMaxParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -42,8 +40,6 @@ import org.apache.solr.util.SolrPluginUt
 import org.apache.solr.analysis.*;
 
 import java.util.*;
-import java.io.Reader;
-import java.io.IOException;
 
 /**
  * An advanced multi-field query parser.
@@ -102,7 +98,7 @@ class ExtendedDismaxQParser extends QPar
     SolrParams localParams = getLocalParams();
     SolrParams params = getParams();
     
-    SolrParams solrParams = localParams == null ? params : new DefaultSolrParams(localParams, params);
+    SolrParams solrParams = SolrParams.wrapDefaults(localParams, params);
 
     final String minShouldMatch = 
       DisMaxQParser.parseMinShouldMatch(req.getSchema(), solrParams);

Modified: lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java Tue Dec 13 18:23:22 2011
@@ -25,10 +25,8 @@ import org.apache.lucene.search.BooleanC
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.AppendedSolrParams;
-import org.apache.solr.common.params.DefaultSolrParams;
-import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.params.UpdateParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
@@ -81,15 +79,10 @@ public class SolrPluginUtils {
                                  SolrParams appends, SolrParams invariants) {
 
       SolrParams p = req.getParams();
-      if (defaults != null) {
-        p = new DefaultSolrParams(p,defaults);
-      }
-      if (appends != null) {
-        p = new AppendedSolrParams(p,appends);
-      }
-      if (invariants != null) {
-        p = new DefaultSolrParams(invariants,p);
-      }
+      p = SolrParams.wrapDefaults(p, defaults);
+      p = SolrParams.wrapAppended(p, appends);
+      p = SolrParams.wrapDefaults(invariants, p);
+
       req.setParams(p);
   }
 

Modified: lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java Tue Dec 13 18:23:22 2011
@@ -34,9 +34,7 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.LogMergePolicy;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.AppendedSolrParams;
 import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.DefaultSolrParams;
 import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -448,7 +446,7 @@ public class BasicFunctionalityTest exte
     assertEquals(p.getInt("iii",5), 5);
     assertEquals(p.getFieldParam("field1","i"), "555");
 
-    req.setParams(new DefaultSolrParams(p, new MapSolrParams(m)));
+    req.setParams(SolrParams.wrapDefaults(p, new MapSolrParams(m)));
     p = req.getParams();
     assertEquals(req.getOriginalParams().get("s"), "bbb");
     assertEquals(p.get("i"), "555");
@@ -470,7 +468,7 @@ public class BasicFunctionalityTest exte
     more.add("s", "ccc");
     more.add("ss","YYY");
     more.add("xx","XXX");
-    p = new AppendedSolrParams(p, SolrParams.toSolrParams(more));
+    p = SolrParams.wrapAppended(p, SolrParams.toSolrParams(more));
     assertEquals(3, p.getParams("s").length);
     assertEquals("bbb", p.getParams("s")[0]);
     assertEquals("aaa", p.getParams("s")[1]);

Modified: lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java?rev=1213825&r1=1213824&r2=1213825&view=diff
==============================================================================
--- lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java (original)
+++ lucene/dev/branches/lucene3622/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java Tue Dec 13 18:23:22 2011
@@ -30,14 +30,21 @@ public class BadComponentTest extends So
   @Test
   public void testBadElevate() throws Exception {
     try {
+      ignoreException(".*constructing.*");
+      ignoreException(".*QueryElevationComponent.*");
       System.setProperty("elevate.file", "foo.xml");
       initCore("solrconfig-elevate.xml", "schema12.xml");
       assertTrue(false);
-    } catch (Throwable e) {
-      log.error("Exception", e);
-      assertTrue(true);
+    } catch (RuntimeException e) {
+      //TODO: better way of checking this?
+      if (e.getCause() instanceof SolrException){
+        assertTrue(true);
+      } else {
+        assertTrue(false);
+      }
     } finally {
       System.clearProperty("elevate.file");
+      resetExceptionIgnores();
     }
   }
 }