You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sh...@apache.org on 2013/05/07 13:21:14 UTC

svn commit: r1479862 [25/38] - in /lucene/dev/branches/lucene4258: ./ dev-tools/ dev-tools/idea/.idea/ dev-tools/idea/.idea/libraries/ dev-tools/maven/ dev-tools/maven/solr/ dev-tools/maven/solr/core/src/java/ dev-tools/maven/solr/solrj/src/java/ dev-t...

Modified: lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java (original)
+++ lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java Tue May  7 11:20:55 2013
@@ -17,20 +17,19 @@ package org.apache.lucene.search;
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.List;
 import java.util.Random;
 import java.util.concurrent.ExecutorService;
-import java.io.IOException;
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util._TestUtil;
 
-/** 
+/**
  * Helper class that adds some extra checks to ensure correct
  * usage of {@code IndexSearcher} and {@code Weight}.
- * TODO: Extend this by more checks, that's just a start.
  */
 public class AssertingIndexSearcher extends IndexSearcher {
   final Random random;
@@ -58,16 +57,7 @@ public class AssertingIndexSearcher exte
   @Override
   public Weight createNormalizedWeight(Query query) throws IOException {
     final Weight w = super.createNormalizedWeight(query);
-    return new Weight() {
-      @Override
-      public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
-        return w.explain(context, doc);
-      }
-
-      @Override
-      public Query getQuery() {
-        return w.getQuery();
-      }
+    return new AssertingWeight(random, w) {
 
       @Override
       public void normalize(float norm, float topLevelBoost) {
@@ -75,41 +65,37 @@ public class AssertingIndexSearcher exte
       }
 
       @Override
-      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
-          boolean topScorer, Bits acceptDocs) throws IOException {
-        Scorer scorer = w.scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
-        if (scorer != null) {
-          // check that scorer obeys disi contract for docID() before next()/advance
-          try {
-            int docid = scorer.docID();
-            assert docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS;
-          } catch (UnsupportedOperationException ignored) {
-            // from a top-level BS1
-            assert topScorer;
-          }
-        }
-        return scorer;
-      }
-
-      @Override
       public float getValueForNormalization() {
         throw new IllegalStateException("Weight already normalized.");
       }
 
-      @Override
-      public boolean scoresDocsOutOfOrder() {
-        // TODO: if this returns false, we should wrap
-        // Scorer with AssertingScorer that confirms docIDs
-        // are in order?
-        return w.scoresDocsOutOfOrder();
-      }
     };
   }
-  
+
+  @Override
+  public Query rewrite(Query original) throws IOException {
+    // TODO: use the more sophisticated QueryUtils.check sometimes!
+    QueryUtils.check(original);
+    Query rewritten = super.rewrite(original);
+    QueryUtils.check(rewritten);
+    return rewritten;
+  }
+
   @Override
   protected Query wrapFilter(Query query, Filter filter) {
     if (random.nextBoolean())
       return super.wrapFilter(query, filter);
     return (filter == null) ? query : new FilteredQuery(query, filter, _TestUtil.randomFilterStrategy(random));
   }
+
+  @Override
+  protected void search(List<AtomicReaderContext> leaves, Weight weight, Collector collector) throws IOException {
+    super.search(leaves, AssertingWeight.wrap(random, weight), collector);
+  }
+
+  @Override
+  public String toString() {
+    return "AssertingIndexSearcher(" + super.toString() + ")";
+  }
+
 }

Modified: lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java (original)
+++ lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/SearchEquivalenceTestBase.java Tue May  7 11:20:55 2013
@@ -59,7 +59,7 @@ public abstract class SearchEquivalenceT
     directory = newDirectory();
     stopword = "" + randomChar();
     CharacterRunAutomaton stopset = new CharacterRunAutomaton(BasicAutomata.makeString(stopword));
-    analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopset, true);
+    analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopset);
     RandomIndexWriter iw = new RandomIndexWriter(random, directory, analyzer);
     Document doc = new Document();
     Field id = new StringField("id", "", Field.Store.NO);

Modified: lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java (original)
+++ lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java Tue May  7 11:20:55 2013
@@ -448,7 +448,7 @@ public abstract class ShardSearchingTest
       currentNodeVersions = new long[numNodes];
     }
 
-    public void initSearcher(long[] nodeVersions) {
+    public void initSearcher(long[] nodeVersions) throws IOException {
       assert currentShardSearcher == null;
       System.arraycopy(nodeVersions, 0, currentNodeVersions, 0, currentNodeVersions.length);
       currentShardSearcher = new ShardIndexSearcher(currentNodeVersions.clone(),

Modified: lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java (original)
+++ lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryWrapper.java Tue May  7 11:20:55 2013
@@ -20,6 +20,7 @@ package org.apache.lucene.store;
 import java.io.IOException;
 import java.util.Collection;
 
+import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.util._TestUtil;
 
 /**
@@ -42,7 +43,7 @@ public class BaseDirectoryWrapper extend
   @Override
   public void close() throws IOException {
     isOpen = false;
-    if (checkIndexOnClose && indexPossiblyExists()) {
+    if (checkIndexOnClose && DirectoryReader.indexExists(this)) {
       _TestUtil.checkIndex(this, crossCheckTermVectorsOnClose);
     }
     delegate.close();
@@ -52,27 +53,6 @@ public class BaseDirectoryWrapper extend
     return isOpen;
   }
   
-  /** 
-   * don't rely upon DirectoryReader.fileExists to determine if we should
-   * checkIndex() or not. It might mask real problems, where we silently
-   * don't checkindex at all. instead we look for a segments file.
-   */
-  protected boolean indexPossiblyExists() {
-    String files[];
-    try {
-      files = listAll();
-    } catch (IOException ex) {
-      // this means directory doesn't exist, which is ok. return false
-      return false;
-    }
-    for (String f : files) {
-      if (f.startsWith("segments_")) {
-        return true;
-      }
-    }
-    return false;
-  }
-  
   /**
    * Set whether or not checkindex should be run
    * on close

Modified: lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java (original)
+++ lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java Tue May  7 11:20:55 2013
@@ -67,6 +67,7 @@ public class MockDirectoryWrapper extend
   // Max actual bytes used. This is set by MockRAMOutputStream:
   long maxUsedSize;
   double randomIOExceptionRate;
+  double randomIOExceptionRateOnOpen;
   Random randomState;
   boolean noDeleteOpenFile = true;
   boolean preventDoubleWrite = true;
@@ -322,23 +323,50 @@ public class MockDirectoryWrapper extend
   public void setRandomIOExceptionRate(double rate) {
     randomIOExceptionRate = rate;
   }
+  
   public double getRandomIOExceptionRate() {
     return randomIOExceptionRate;
   }
 
+  /**
+   * If 0.0, no exceptions will be thrown during openInput
+   * and createOutput.  Else this should
+   * be a double 0.0 - 1.0 and we will randomly throw an
+   * IOException in openInput and createOutput with
+   * this probability.
+   */
+  public void setRandomIOExceptionRateOnOpen(double rate) {
+    randomIOExceptionRateOnOpen = rate;
+  }
+  
+  public double getRandomIOExceptionRateOnOpen() {
+    return randomIOExceptionRateOnOpen;
+  }
+
   void maybeThrowIOException() throws IOException {
     maybeThrowIOException(null);
   }
 
   void maybeThrowIOException(String message) throws IOException {
-    if (randomIOExceptionRate > 0.0) {
-      int number = Math.abs(randomState.nextInt() % 1000);
-      if (number < randomIOExceptionRate*1000) {
-        if (LuceneTestCase.VERBOSE) {
-          System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception" + (message == null ? "" : " (" + message + ")"));
-          new Throwable().printStackTrace(System.out);
-        }
-        throw new IOException("a random IOException" + (message == null ? "" : "(" + message + ")"));
+    if (randomState.nextDouble() < randomIOExceptionRate) {
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception" + (message == null ? "" : " (" + message + ")"));
+        new Throwable().printStackTrace(System.out);
+      }
+      throw new IOException("a random IOException" + (message == null ? "" : "(" + message + ")"));
+    }
+  }
+
+  void maybeThrowIOExceptionOnOpen() throws IOException {
+    if (randomState.nextDouble() < randomIOExceptionRateOnOpen) {
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception during open");
+        new Throwable().printStackTrace(System.out);
+      }
+      if (randomState.nextBoolean()) {
+        throw new IOException("a random IOException");
+      } else {
+        throw new FileNotFoundException("a random IOException");
       }
     }
   }
@@ -403,22 +431,28 @@ public class MockDirectoryWrapper extend
   
   @Override
   public synchronized IndexOutput createOutput(String name, IOContext context) throws IOException {
+    maybeThrowDeterministicException();
+    maybeThrowIOExceptionOnOpen();
     maybeYield();
     if (failOnCreateOutput) {
       maybeThrowDeterministicException();
     }
-    if (crashed)
+    if (crashed) {
       throw new IOException("cannot createOutput after crash");
+    }
     init();
     synchronized(this) {
-      if (preventDoubleWrite && createdFiles.contains(name) && !name.equals("segments.gen"))
+      if (preventDoubleWrite && createdFiles.contains(name) && !name.equals("segments.gen")) {
         throw new IOException("file \"" + name + "\" was already written to");
+      }
     }
-    if (noDeleteOpenFile && openFiles.containsKey(name))
+    if (noDeleteOpenFile && openFiles.containsKey(name)) {
       throw new IOException("MockDirectoryWrapper: file \"" + name + "\" is still open: cannot overwrite");
+    }
     
-    if (crashed)
+    if (crashed) {
       throw new IOException("cannot createOutput after crash");
+    }
     unSyncedFiles.add(name);
     createdFiles.add(name);
     
@@ -428,9 +462,9 @@ public class MockDirectoryWrapper extend
       RAMFile existing = ramdir.fileMap.get(name);
     
       // Enforce write once:
-      if (existing!=null && !name.equals("segments.gen") && preventDoubleWrite)
+      if (existing!=null && !name.equals("segments.gen") && preventDoubleWrite) {
         throw new IOException("file " + name + " already exists");
-      else {
+      } else {
         if (existing!=null) {
           ramdir.sizeInBytes.getAndAdd(-existing.sizeInBytes);
           existing.directory = null;
@@ -484,6 +518,8 @@ public class MockDirectoryWrapper extend
 
   @Override
   public synchronized IndexInput openInput(String name, IOContext context) throws IOException {
+    maybeThrowDeterministicException();
+    maybeThrowIOExceptionOnOpen();
     maybeYield();
     if (failOnOpenInput) {
       maybeThrowDeterministicException();
@@ -587,9 +623,12 @@ public class MockDirectoryWrapper extend
     if (noDeleteOpenFile && openLocks.size() > 0) {
       throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open locks: " + openLocks);
     }
+
     isOpen = false;
     if (getCheckIndexOnClose()) {
-      if (indexPossiblyExists()) {
+      randomIOExceptionRate = 0.0;
+      randomIOExceptionRateOnOpen = 0.0;
+      if (DirectoryReader.indexExists(this)) {
         if (LuceneTestCase.VERBOSE) {
           System.out.println("\nNOTE: MockDirectoryWrapper: now crash");
         }
@@ -793,7 +832,7 @@ public class MockDirectoryWrapper extend
       }
     }
   }
-
+  
   @Override
   public synchronized String[] listAll() throws IOException {
     maybeYield();

Modified: lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexOutputWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexOutputWrapper.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexOutputWrapper.java (original)
+++ lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/store/MockIndexOutputWrapper.java Tue May  7 11:20:55 2013
@@ -43,6 +43,50 @@ public class MockIndexOutputWrapper exte
     this.delegate = delegate;
   }
 
+  private void checkCrashed() throws IOException {
+    // If MockRAMDir crashed since we were opened, then don't write anything
+    if (dir.crashed) {
+      throw new IOException("MockRAMDirectory was crashed; cannot write to " + name);
+    }
+  }
+  
+  private void checkDiskFull(byte[] b, int offset, DataInput in, long len) throws IOException {
+    long freeSpace = dir.maxSize == 0 ? 0 : dir.maxSize - dir.sizeInBytes();
+    long realUsage = 0;
+
+    // Enforce disk full:
+    if (dir.maxSize != 0 && freeSpace < len) {
+      // Compute the real disk free.  This will greatly slow
+      // down our test but makes it more accurate:
+      realUsage = dir.getRecomputedActualSizeInBytes();
+      freeSpace = dir.maxSize - realUsage;
+    }
+
+    if (dir.maxSize != 0 && freeSpace < len) {
+      if (freeSpace > 0) {
+        realUsage += freeSpace;
+        if (b != null) {
+          delegate.writeBytes(b, offset, (int) freeSpace);
+        } else {
+          delegate.copyBytes(in, len);
+        }
+      }
+      if (realUsage > dir.maxUsedSize) {
+        dir.maxUsedSize = realUsage;
+      }
+      String message = "fake disk full at " + dir.getRecomputedActualSizeInBytes() + " bytes when writing " + name + " (file length=" + delegate.length();
+      if (freeSpace > 0) {
+        message += "; wrote " + freeSpace + " of " + len + " bytes";
+      }
+      message += ")";
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": MDW: now throw fake disk full");
+        new Throwable().printStackTrace(System.out);
+      }
+      throw new IOException(message);
+    }
+  }
+  
   @Override
   public void close() throws IOException {
     try {
@@ -75,48 +119,16 @@ public class MockIndexOutputWrapper exte
   
   @Override
   public void writeBytes(byte[] b, int offset, int len) throws IOException {
-    long freeSpace = dir.maxSize == 0 ? 0 : dir.maxSize - dir.sizeInBytes();
-    long realUsage = 0;
-    // If MockRAMDir crashed since we were opened, then
-    // don't write anything:
-    if (dir.crashed)
-      throw new IOException("MockRAMDirectory was crashed; cannot write to " + name);
-
-    // Enforce disk full:
-    if (dir.maxSize != 0 && freeSpace <= len) {
-      // Compute the real disk free.  This will greatly slow
-      // down our test but makes it more accurate:
-      realUsage = dir.getRecomputedActualSizeInBytes();
-      freeSpace = dir.maxSize - realUsage;
-    }
-
-    if (dir.maxSize != 0 && freeSpace <= len) {
-      if (freeSpace > 0) {
-        realUsage += freeSpace;
-        delegate.writeBytes(b, offset, (int) freeSpace);
-      }
-      if (realUsage > dir.maxUsedSize) {
-        dir.maxUsedSize = realUsage;
-      }
-      String message = "fake disk full at " + dir.getRecomputedActualSizeInBytes() + " bytes when writing " + name + " (file length=" + delegate.length();
-      if (freeSpace > 0) {
-        message += "; wrote " + freeSpace + " of " + len + " bytes";
-      }
-      message += ")";
-      if (LuceneTestCase.VERBOSE) {
-        System.out.println(Thread.currentThread().getName() + ": MDW: now throw fake disk full");
-        new Throwable().printStackTrace(System.out);
-      }
-      throw new IOException(message);
+    checkCrashed();
+    checkDiskFull(b, offset, null, len);
+    
+    if (dir.randomState.nextInt(200) == 0) {
+      final int half = len/2;
+      delegate.writeBytes(b, offset, half);
+      Thread.yield();
+      delegate.writeBytes(b, offset+half, len-half);
     } else {
-      if (dir.randomState.nextInt(200) == 0) {
-        final int half = len/2;
-        delegate.writeBytes(b, offset, half);
-        Thread.yield();
-        delegate.writeBytes(b, offset+half, len-half);
-      } else {
-        delegate.writeBytes(b, offset, len);
-      }
+      delegate.writeBytes(b, offset, len);
     }
 
     dir.maybeThrowDeterministicException();
@@ -146,8 +158,10 @@ public class MockIndexOutputWrapper exte
 
   @Override
   public void copyBytes(DataInput input, long numBytes) throws IOException {
+    checkCrashed();
+    checkDiskFull(null, 0, input, numBytes);
+    
     delegate.copyBytes(input, numBytes);
-    // TODO: we may need to check disk full here as well
     dir.maybeThrowDeterministicException();
   }
 

Modified: lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java Tue May  7 11:20:55 2013
@@ -34,6 +34,7 @@ import org.apache.lucene.document.String
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.IndexReader.ReaderClosedListener;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.FieldCache.CacheEntry;
 import org.apache.lucene.search.QueryUtils.FCInvisibleMultiReader;
@@ -41,6 +42,9 @@ import org.apache.lucene.store.*;
 import org.apache.lucene.store.IOContext.Context;
 import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
 import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
+import org.apache.lucene.util.automaton.AutomatonTestUtil;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
+import org.apache.lucene.util.automaton.RegExp;
 import org.junit.*;
 import org.junit.rules.RuleChain;
 import org.junit.rules.TestRule;
@@ -621,7 +625,7 @@ public abstract class LuceneTestCase ext
    * is active and {@link #RANDOM_MULTIPLIER}.
    */
   public static boolean rarely(Random random) {
-    int p = TEST_NIGHTLY ? 10 : 5;
+    int p = TEST_NIGHTLY ? 10 : 1;
     p += (p * Math.log(RANDOM_MULTIPLIER));
     int min = 100 - Math.min(p, 50); // never more than 50
     return random.nextInt(100) >= min;
@@ -700,6 +704,16 @@ public abstract class LuceneTestCase ext
   public static IndexWriterConfig newIndexWriterConfig(Random r, Version v, Analyzer a) {
     IndexWriterConfig c = new IndexWriterConfig(v, a);
     c.setSimilarity(classEnvRule.similarity);
+    if (VERBOSE) {
+      // Even though TestRuleSetupAndRestoreClassEnv calls
+      // InfoStream.setDefault, we do it again here so that
+      // the PrintStreamInfoStream.messageID increments so
+      // that when there are separate instances of
+      // IndexWriter created we see "IW 0", "IW 1", "IW 2",
+      // ... instead of just always "IW 0":
+      c.setInfoStream(new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream(System.out));
+    }
+
     if (r.nextBoolean()) {
       c.setMergeScheduler(new SerialMergeScheduler());
     }
@@ -770,6 +784,9 @@ public abstract class LuceneTestCase ext
     } else {
       c.setMergePolicy(newLogMergePolicy());
     }
+    if (rarely(r)) {
+      c.setMergedSegmentWarmer(new SimpleMergedSegmentWarmer(c.getInfoStream()));
+    }
     c.setReaderPooling(r.nextBoolean());
     c.setReaderTermsIndexDivisor(_TestUtil.nextInt(r, 1, 4));
     return c;
@@ -1233,7 +1250,7 @@ public abstract class LuceneTestCase ext
    * Create a new searcher over the reader. This searcher might randomly use
    * threads.
    */
-  public static IndexSearcher newSearcher(IndexReader r) throws IOException {
+  public static IndexSearcher newSearcher(IndexReader r) {
     return newSearcher(r, true);
   }
   
@@ -1242,16 +1259,26 @@ public abstract class LuceneTestCase ext
    * threads. if <code>maybeWrap</code> is true, this searcher might wrap the
    * reader with one that returns null for getSequentialSubReaders.
    */
-  public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
+  public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) {
     Random random = random();
     if (usually()) {
       if (maybeWrap) {
-        r = maybeWrapReader(r);
+        try {
+          r = maybeWrapReader(r);
+        } catch (IOException e) {
+          throw new AssertionError(e);
+        }
       }
-      if (rarely() && r instanceof AtomicReader) {
+      // TODO: this whole check is a coverage hack, we should move it to tests for various filterreaders.
+      // ultimately whatever you do will be checkIndex'd at the end anyway. 
+      if (random.nextInt(500) == 0 && r instanceof AtomicReader) {
         // TODO: not useful to check DirectoryReader (redundant with checkindex)
         // but maybe sometimes run this on the other crazy readers maybeWrapReader creates?
-        _TestUtil.checkReader(r);
+        try {
+          _TestUtil.checkReader(r);
+        } catch (IOException e) {
+          throw new AssertionError(e);
+        }
       }
       IndexSearcher ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getContext());
       ret.setSimilarity(classEnvRule.similarity);
@@ -1309,4 +1336,648 @@ public abstract class LuceneTestCase ext
     }
     return true;
   }
+
+  public void assertReaderEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    assertReaderStatisticsEquals(info, leftReader, rightReader);
+    assertFieldsEquals(info, leftReader, MultiFields.getFields(leftReader), MultiFields.getFields(rightReader), true);
+    assertNormsEquals(info, leftReader, rightReader);
+    assertStoredFieldsEquals(info, leftReader, rightReader);
+    assertTermVectorsEquals(info, leftReader, rightReader);
+    assertDocValuesEquals(info, leftReader, rightReader);
+    assertDeletedDocsEquals(info, leftReader, rightReader);
+    assertFieldInfosEquals(info, leftReader, rightReader);
+  }
+
+  /** 
+   * checks that reader-level statistics are the same 
+   */
+  public void assertReaderStatisticsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    // Somewhat redundant: we never delete docs
+    assertEquals(info, leftReader.maxDoc(), rightReader.maxDoc());
+    assertEquals(info, leftReader.numDocs(), rightReader.numDocs());
+    assertEquals(info, leftReader.numDeletedDocs(), rightReader.numDeletedDocs());
+    assertEquals(info, leftReader.hasDeletions(), rightReader.hasDeletions());
+  }
+
+  /** 
+   * Fields api equivalency 
+   */
+  public void assertFieldsEquals(String info, IndexReader leftReader, Fields leftFields, Fields rightFields, boolean deep) throws IOException {
+    // Fields could be null if there are no postings,
+    // but then it must be null for both
+    if (leftFields == null || rightFields == null) {
+      assertNull(info, leftFields);
+      assertNull(info, rightFields);
+      return;
+    }
+    assertFieldStatisticsEquals(info, leftFields, rightFields);
+    
+    Iterator<String> leftEnum = leftFields.iterator();
+    Iterator<String> rightEnum = rightFields.iterator();
+    
+    while (leftEnum.hasNext()) {
+      String field = leftEnum.next();
+      assertEquals(info, field, rightEnum.next());
+      assertTermsEquals(info, leftReader, leftFields.terms(field), rightFields.terms(field), deep);
+    }
+    assertFalse(rightEnum.hasNext());
+  }
+
+  /** 
+   * checks that top-level statistics on Fields are the same 
+   */
+  public void assertFieldStatisticsEquals(String info, Fields leftFields, Fields rightFields) throws IOException {
+    if (leftFields.size() != -1 && rightFields.size() != -1) {
+      assertEquals(info, leftFields.size(), rightFields.size());
+    }
+  }
+
+  /** 
+   * Terms api equivalency 
+   */
+  public void assertTermsEquals(String info, IndexReader leftReader, Terms leftTerms, Terms rightTerms, boolean deep) throws IOException {
+    if (leftTerms == null || rightTerms == null) {
+      assertNull(info, leftTerms);
+      assertNull(info, rightTerms);
+      return;
+    }
+    assertTermsStatisticsEquals(info, leftTerms, rightTerms);
+    assertEquals(leftTerms.hasOffsets(), rightTerms.hasOffsets());
+    assertEquals(leftTerms.hasPositions(), rightTerms.hasPositions());
+    assertEquals(leftTerms.hasPayloads(), rightTerms.hasPayloads());
+
+    TermsEnum leftTermsEnum = leftTerms.iterator(null);
+    TermsEnum rightTermsEnum = rightTerms.iterator(null);
+    assertTermsEnumEquals(info, leftReader, leftTermsEnum, rightTermsEnum, true);
+    
+    assertTermsSeekingEquals(info, leftTerms, rightTerms);
+    
+    if (deep) {
+      int numIntersections = atLeast(3);
+      for (int i = 0; i < numIntersections; i++) {
+        String re = AutomatonTestUtil.randomRegexp(random());
+        CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton());
+        if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
+          // TODO: test start term too
+          TermsEnum leftIntersection = leftTerms.intersect(automaton, null);
+          TermsEnum rightIntersection = rightTerms.intersect(automaton, null);
+          assertTermsEnumEquals(info, leftReader, leftIntersection, rightIntersection, rarely());
+        }
+      }
+    }
+  }
+
+  /** 
+   * checks collection-level statistics on Terms 
+   */
+  public void assertTermsStatisticsEquals(String info, Terms leftTerms, Terms rightTerms) throws IOException {
+    assert leftTerms.getComparator() == rightTerms.getComparator();
+    if (leftTerms.getDocCount() != -1 && rightTerms.getDocCount() != -1) {
+      assertEquals(info, leftTerms.getDocCount(), rightTerms.getDocCount());
+    }
+    if (leftTerms.getSumDocFreq() != -1 && rightTerms.getSumDocFreq() != -1) {
+      assertEquals(info, leftTerms.getSumDocFreq(), rightTerms.getSumDocFreq());
+    }
+    if (leftTerms.getSumTotalTermFreq() != -1 && rightTerms.getSumTotalTermFreq() != -1) {
+      assertEquals(info, leftTerms.getSumTotalTermFreq(), rightTerms.getSumTotalTermFreq());
+    }
+    if (leftTerms.size() != -1 && rightTerms.size() != -1) {
+      assertEquals(info, leftTerms.size(), rightTerms.size());
+    }
+  }
+
+  private static class RandomBits implements Bits {
+    FixedBitSet bits;
+    
+    RandomBits(int maxDoc, double pctLive, Random random) {
+      bits = new FixedBitSet(maxDoc);
+      for (int i = 0; i < maxDoc; i++) {
+        if (random.nextDouble() <= pctLive) {        
+          bits.set(i);
+        }
+      }
+    }
+    
+    @Override
+    public boolean get(int index) {
+      return bits.get(index);
+    }
+
+    @Override
+    public int length() {
+      return bits.length();
+    }
+  }
+
+  /** 
+   * checks the terms enum sequentially
+   * if deep is false, it does a 'shallow' test that doesnt go down to the docsenums
+   */
+  public void assertTermsEnumEquals(String info, IndexReader leftReader, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws IOException {
+    BytesRef term;
+    Bits randomBits = new RandomBits(leftReader.maxDoc(), random().nextDouble(), random());
+    DocsAndPositionsEnum leftPositions = null;
+    DocsAndPositionsEnum rightPositions = null;
+    DocsEnum leftDocs = null;
+    DocsEnum rightDocs = null;
+    
+    while ((term = leftTermsEnum.next()) != null) {
+      assertEquals(info, term, rightTermsEnum.next());
+      assertTermStatsEquals(info, leftTermsEnum, rightTermsEnum);
+      if (deep) {
+        assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
+                                   rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+        assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
+                                   rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+
+        assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+                                leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
+                                rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+        assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+                                leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
+                                rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+
+        // with freqs:
+        assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs),
+            rightDocs = rightTermsEnum.docs(null, rightDocs),
+            true);
+        assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
+            rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
+            true);
+
+        // w/o freqs:
+        assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
+            rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
+            false);
+        assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
+            rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
+            false);
+        
+        // with freqs:
+        assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+            leftDocs = leftTermsEnum.docs(null, leftDocs),
+            rightDocs = rightTermsEnum.docs(null, rightDocs),
+            true);
+        assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+            leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
+            rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
+            true);
+
+        // w/o freqs:
+        assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+            leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
+            rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
+            false);
+        assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+            leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
+            rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
+            false);
+      }
+    }
+    assertNull(info, rightTermsEnum.next());
+  }
+
+
+  /**
+   * checks docs + freqs + positions + payloads, sequentially
+   */
+  public void assertDocsAndPositionsEnumEquals(String info, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+    if (leftDocs == null || rightDocs == null) {
+      assertNull(leftDocs);
+      assertNull(rightDocs);
+      return;
+    }
+    assertEquals(info, -1, leftDocs.docID());
+    assertEquals(info, -1, rightDocs.docID());
+    int docid;
+    while ((docid = leftDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+      assertEquals(info, docid, rightDocs.nextDoc());
+      int freq = leftDocs.freq();
+      assertEquals(info, freq, rightDocs.freq());
+      for (int i = 0; i < freq; i++) {
+        assertEquals(info, leftDocs.nextPosition(), rightDocs.nextPosition());
+        assertEquals(info, leftDocs.getPayload(), rightDocs.getPayload());
+        assertEquals(info, leftDocs.startOffset(), rightDocs.startOffset());
+        assertEquals(info, leftDocs.endOffset(), rightDocs.endOffset());
+      }
+    }
+    assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc());
+  }
+  
+  /**
+   * checks docs + freqs, sequentially
+   */
+  public void assertDocsEnumEquals(String info, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws IOException {
+    if (leftDocs == null) {
+      assertNull(rightDocs);
+      return;
+    }
+    assertEquals(info, -1, leftDocs.docID());
+    assertEquals(info, -1, rightDocs.docID());
+    int docid;
+    while ((docid = leftDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+      assertEquals(info, docid, rightDocs.nextDoc());
+      if (hasFreqs) {
+        assertEquals(info, leftDocs.freq(), rightDocs.freq());
+      }
+    }
+    assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc());
+  }
+  
+  /**
+   * checks advancing docs
+   */
+  public void assertDocsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws IOException {
+    if (leftDocs == null) {
+      assertNull(rightDocs);
+      return;
+    }
+    int docid = -1;
+    int averageGap = leftReader.maxDoc() / (1+docFreq);
+    int skipInterval = 16;
+
+    while (true) {
+      if (random().nextBoolean()) {
+        // nextDoc()
+        docid = leftDocs.nextDoc();
+        assertEquals(info, docid, rightDocs.nextDoc());
+      } else {
+        // advance()
+        int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap));
+        docid = leftDocs.advance(skip);
+        assertEquals(info, docid, rightDocs.advance(skip));
+      }
+      
+      if (docid == DocIdSetIterator.NO_MORE_DOCS) {
+        return;
+      }
+      if (hasFreqs) {
+        assertEquals(info, leftDocs.freq(), rightDocs.freq());
+      }
+    }
+  }
+  
+  /**
+   * checks advancing docs + positions
+   */
+  public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+    if (leftDocs == null || rightDocs == null) {
+      assertNull(leftDocs);
+      assertNull(rightDocs);
+      return;
+    }
+    
+    int docid = -1;
+    int averageGap = leftReader.maxDoc() / (1+docFreq);
+    int skipInterval = 16;
+
+    while (true) {
+      if (random().nextBoolean()) {
+        // nextDoc()
+        docid = leftDocs.nextDoc();
+        assertEquals(info, docid, rightDocs.nextDoc());
+      } else {
+        // advance()
+        int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap));
+        docid = leftDocs.advance(skip);
+        assertEquals(info, docid, rightDocs.advance(skip));
+      }
+      
+      if (docid == DocIdSetIterator.NO_MORE_DOCS) {
+        return;
+      }
+      int freq = leftDocs.freq();
+      assertEquals(info, freq, rightDocs.freq());
+      for (int i = 0; i < freq; i++) {
+        assertEquals(info, leftDocs.nextPosition(), rightDocs.nextPosition());
+        assertEquals(info, leftDocs.getPayload(), rightDocs.getPayload());
+      }
+    }
+  }
+
+  
+  private void assertTermsSeekingEquals(String info, Terms leftTerms, Terms rightTerms) throws IOException {
+    TermsEnum leftEnum = null;
+    TermsEnum rightEnum = null;
+
+    // just an upper bound
+    int numTests = atLeast(20);
+    Random random = random();
+
+    // collect this number of terms from the left side
+    HashSet<BytesRef> tests = new HashSet<BytesRef>();
+    int numPasses = 0;
+    while (numPasses < 10 && tests.size() < numTests) {
+      leftEnum = leftTerms.iterator(leftEnum);
+      BytesRef term = null;
+      while ((term = leftEnum.next()) != null) {
+        int code = random.nextInt(10);
+        if (code == 0) {
+          // the term
+          tests.add(BytesRef.deepCopyOf(term));
+        } else if (code == 1) {
+          // truncated subsequence of term
+          term = BytesRef.deepCopyOf(term);
+          if (term.length > 0) {
+            // truncate it
+            term.length = random.nextInt(term.length);
+          }
+        } else if (code == 2) {
+          // term, but ensure a non-zero offset
+          byte newbytes[] = new byte[term.length+5];
+          System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length);
+          tests.add(new BytesRef(newbytes, 5, term.length));
+        } else if (code == 3) {
+          switch (random().nextInt(3)) {
+            case 0:
+              tests.add(new BytesRef()); // before the first term
+              break;
+            case 1:
+              tests.add(new BytesRef(new byte[] {(byte) 0xFF, (byte) 0xFF})); // past the last term
+              break;
+            case 2:
+              tests.add(new BytesRef(_TestUtil.randomSimpleString(random()))); // random term
+              break;
+            default:
+              throw new AssertionError();
+          }
+        }
+      }
+      numPasses++;
+    }
+
+    rightEnum = rightTerms.iterator(rightEnum);
+
+    ArrayList<BytesRef> shuffledTests = new ArrayList<BytesRef>(tests);
+    Collections.shuffle(shuffledTests, random);
+
+    for (BytesRef b : shuffledTests) {
+      if (rarely()) {
+        // reuse the enums
+        leftEnum = leftTerms.iterator(leftEnum);
+        rightEnum = rightTerms.iterator(rightEnum);
+      }
+
+      final boolean useCache = random().nextBoolean();
+      final boolean seekExact = random().nextBoolean();
+
+      if (seekExact) {
+        assertEquals(info, leftEnum.seekExact(b, useCache), rightEnum.seekExact(b, useCache));
+      } else {
+        SeekStatus leftStatus = leftEnum.seekCeil(b, useCache);
+        SeekStatus rightStatus = rightEnum.seekCeil(b, useCache);
+        assertEquals(info, leftStatus, rightStatus);
+        if (leftStatus != SeekStatus.END) {
+          assertEquals(info, leftEnum.term(), rightEnum.term());
+          assertTermStatsEquals(info, leftEnum, rightEnum);
+        }
+      }
+    }
+  }
+  
+  /**
+   * checks term-level statistics
+   */
+  public void assertTermStatsEquals(String info, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum) throws IOException {
+    assertEquals(info, leftTermsEnum.docFreq(), rightTermsEnum.docFreq());
+    if (leftTermsEnum.totalTermFreq() != -1 && rightTermsEnum.totalTermFreq() != -1) {
+      assertEquals(info, leftTermsEnum.totalTermFreq(), rightTermsEnum.totalTermFreq());
+    }
+  }
+  
+  /** 
+   * checks that norms are the same across all fields 
+   */
+  public void assertNormsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    Fields leftFields = MultiFields.getFields(leftReader);
+    Fields rightFields = MultiFields.getFields(rightReader);
+    // Fields could be null if there are no postings,
+    // but then it must be null for both
+    if (leftFields == null || rightFields == null) {
+      assertNull(info, leftFields);
+      assertNull(info, rightFields);
+      return;
+    }
+    
+    for (String field : leftFields) {
+      NumericDocValues leftNorms = MultiDocValues.getNormValues(leftReader, field);
+      NumericDocValues rightNorms = MultiDocValues.getNormValues(rightReader, field);
+      if (leftNorms != null && rightNorms != null) {
+        assertDocValuesEquals(info, leftReader.maxDoc(), leftNorms, rightNorms);
+      } else {
+        assertNull(info, leftNorms);
+        assertNull(info, rightNorms);
+      }
+    }
+  }
+  
+  /** 
+   * checks that stored fields of all documents are the same 
+   */
+  public void assertStoredFieldsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    assert leftReader.maxDoc() == rightReader.maxDoc();
+    for (int i = 0; i < leftReader.maxDoc(); i++) {
+      StoredDocument leftDoc = leftReader.document(i);
+      StoredDocument rightDoc = rightReader.document(i);
+      
+      // TODO: I think this is bogus because we don't document what the order should be
+      // from these iterators, etc. I think the codec/IndexReader should be free to order this stuff
+      // in whatever way it wants (e.g. maybe it packs related fields together or something)
+      // To fix this, we sort the fields in both documents by name, but
+      // we still assume that all instances with same name are in order:
+      Comparator<StorableField> comp = new Comparator<StorableField>() {
+        @Override
+        public int compare(StorableField arg0, StorableField arg1) {
+          return arg0.name().compareTo(arg1.name());
+        }        
+      };
+      Collections.sort(leftDoc.getFields(), comp);
+      Collections.sort(rightDoc.getFields(), comp);
+
+      Iterator<StorableField> leftIterator = leftDoc.iterator();
+      Iterator<StorableField> rightIterator = rightDoc.iterator();
+      while (leftIterator.hasNext()) {
+        assertTrue(info, rightIterator.hasNext());
+        assertStoredFieldEquals(info, leftIterator.next(), rightIterator.next());
+      }
+      assertFalse(info, rightIterator.hasNext());
+    }
+  }
+  
+  /** 
+   * checks that two stored fields are equivalent 
+   */
+  public void assertStoredFieldEquals(String info, StorableField leftField, StorableField rightField) {
+    assertEquals(info, leftField.name(), rightField.name());
+    assertEquals(info, leftField.binaryValue(), rightField.binaryValue());
+    assertEquals(info, leftField.stringValue(), rightField.stringValue());
+    assertEquals(info, leftField.numericValue(), rightField.numericValue());
+    // TODO: should we check the FT at all?
+  }
+  
+  /** 
+   * checks that term vectors across all fields are equivalent 
+   */
+  public void assertTermVectorsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    assert leftReader.maxDoc() == rightReader.maxDoc();
+    for (int i = 0; i < leftReader.maxDoc(); i++) {
+      Fields leftFields = leftReader.getTermVectors(i);
+      Fields rightFields = rightReader.getTermVectors(i);
+      assertFieldsEquals(info, leftReader, leftFields, rightFields, rarely());
+    }
+  }
+
+  private static Set<String> getDVFields(IndexReader reader) {
+    Set<String> fields = new HashSet<String>();
+    for(FieldInfo fi : MultiFields.getMergedFieldInfos(reader)) {
+      if (fi.hasDocValues()) {
+        fields.add(fi.name);
+      }
+    }
+
+    return fields;
+  }
+  
+  /**
+   * checks that docvalues across all fields are equivalent
+   */
+  public void assertDocValuesEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    Set<String> leftFields = getDVFields(leftReader);
+    Set<String> rightFields = getDVFields(rightReader);
+    assertEquals(info, leftFields, rightFields);
+
+    for (String field : leftFields) {
+      // TODO: clean this up... very messy
+      {
+        NumericDocValues leftValues = MultiDocValues.getNumericValues(leftReader, field);
+        NumericDocValues rightValues = MultiDocValues.getNumericValues(rightReader, field);
+        if (leftValues != null && rightValues != null) {
+          assertDocValuesEquals(info, leftReader.maxDoc(), leftValues, rightValues);
+        } else {
+          assertNull(info, leftValues);
+          assertNull(info, rightValues);
+        }
+      }
+
+      {
+        BinaryDocValues leftValues = MultiDocValues.getBinaryValues(leftReader, field);
+        BinaryDocValues rightValues = MultiDocValues.getBinaryValues(rightReader, field);
+        if (leftValues != null && rightValues != null) {
+          BytesRef scratchLeft = new BytesRef();
+          BytesRef scratchRight = new BytesRef();
+          for(int docID=0;docID<leftReader.maxDoc();docID++) {
+            leftValues.get(docID, scratchLeft);
+            rightValues.get(docID, scratchRight);
+            assertEquals(info, scratchLeft, scratchRight);
+          }
+        } else {
+          assertNull(info, leftValues);
+          assertNull(info, rightValues);
+        }
+      }
+      
+      {
+        SortedDocValues leftValues = MultiDocValues.getSortedValues(leftReader, field);
+        SortedDocValues rightValues = MultiDocValues.getSortedValues(rightReader, field);
+        if (leftValues != null && rightValues != null) {
+          // numOrds
+          assertEquals(info, leftValues.getValueCount(), rightValues.getValueCount());
+          // ords
+          BytesRef scratchLeft = new BytesRef();
+          BytesRef scratchRight = new BytesRef();
+          for (int i = 0; i < leftValues.getValueCount(); i++) {
+            leftValues.lookupOrd(i, scratchLeft);
+            rightValues.lookupOrd(i, scratchRight);
+            assertEquals(info, scratchLeft, scratchRight);
+          }
+          // bytes
+          for(int docID=0;docID<leftReader.maxDoc();docID++) {
+            leftValues.get(docID, scratchLeft);
+            rightValues.get(docID, scratchRight);
+            assertEquals(info, scratchLeft, scratchRight);
+          }
+        } else {
+          assertNull(info, leftValues);
+          assertNull(info, rightValues);
+        }
+      }
+      
+      {
+        SortedSetDocValues leftValues = MultiDocValues.getSortedSetValues(leftReader, field);
+        SortedSetDocValues rightValues = MultiDocValues.getSortedSetValues(rightReader, field);
+        if (leftValues != null && rightValues != null) {
+          // numOrds
+          assertEquals(info, leftValues.getValueCount(), rightValues.getValueCount());
+          // ords
+          BytesRef scratchLeft = new BytesRef();
+          BytesRef scratchRight = new BytesRef();
+          for (int i = 0; i < leftValues.getValueCount(); i++) {
+            leftValues.lookupOrd(i, scratchLeft);
+            rightValues.lookupOrd(i, scratchRight);
+            assertEquals(info, scratchLeft, scratchRight);
+          }
+          // ord lists
+          for(int docID=0;docID<leftReader.maxDoc();docID++) {
+            leftValues.setDocument(docID);
+            rightValues.setDocument(docID);
+            long ord;
+            while ((ord = leftValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
+              assertEquals(info, ord, rightValues.nextOrd());
+            }
+            assertEquals(info, SortedSetDocValues.NO_MORE_ORDS, rightValues.nextOrd());
+          }
+        } else {
+          assertNull(info, leftValues);
+          assertNull(info, rightValues);
+        }
+      }
+    }
+  }
+  
+  public void assertDocValuesEquals(String info, int num, NumericDocValues leftDocValues, NumericDocValues rightDocValues) throws IOException {
+    assertNotNull(info, leftDocValues);
+    assertNotNull(info, rightDocValues);
+    for(int docID=0;docID<num;docID++) {
+      assertEquals(leftDocValues.get(docID),
+                   rightDocValues.get(docID));
+    }
+  }
+  
+  // TODO: this is kinda stupid, we don't delete documents in the test.
+  public void assertDeletedDocsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    assert leftReader.numDeletedDocs() == rightReader.numDeletedDocs();
+    Bits leftBits = MultiFields.getLiveDocs(leftReader);
+    Bits rightBits = MultiFields.getLiveDocs(rightReader);
+    
+    if (leftBits == null || rightBits == null) {
+      assertNull(info, leftBits);
+      assertNull(info, rightBits);
+      return;
+    }
+    
+    assert leftReader.maxDoc() == rightReader.maxDoc();
+    assertEquals(info, leftBits.length(), rightBits.length());
+    for (int i = 0; i < leftReader.maxDoc(); i++) {
+      assertEquals(info, leftBits.get(i), rightBits.get(i));
+    }
+  }
+  
+  public void assertFieldInfosEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    FieldInfos leftInfos = MultiFields.getMergedFieldInfos(leftReader);
+    FieldInfos rightInfos = MultiFields.getMergedFieldInfos(rightReader);
+    
+    // TODO: would be great to verify more than just the names of the fields!
+    TreeSet<String> left = new TreeSet<String>();
+    TreeSet<String> right = new TreeSet<String>();
+    
+    for (FieldInfo fi : leftInfos) {
+      left.add(fi.name);
+    }
+    
+    for (FieldInfo fi : rightInfos) {
+      right.add(fi.name);
+    }
+    
+    assertEquals(info, left, right);
+  }
 }

Modified: lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java (original)
+++ lucene/dev/branches/lucene4258/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java Tue May  7 11:20:55 2013
@@ -17,6 +17,7 @@ package org.apache.lucene.util;
  * limitations under the License.
  */
 
+import java.io.PrintStream;
 import java.util.Arrays;
 import java.util.Date;
 import java.util.HashMap;
@@ -33,21 +34,17 @@ import org.apache.lucene.codecs.Postings
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.cheapbastard.CheapBastardCodec;
 import org.apache.lucene.codecs.compressing.CompressingCodec;
-import org.apache.lucene.codecs.lucene40.Lucene40Codec;
 import org.apache.lucene.codecs.lucene40.Lucene40RWCodec;
 import org.apache.lucene.codecs.lucene40.Lucene40RWPostingsFormat;
-import org.apache.lucene.codecs.lucene41.Lucene41Codec;
 import org.apache.lucene.codecs.lucene41.Lucene41RWCodec;
 import org.apache.lucene.codecs.lucene42.Lucene42Codec;
-import org.apache.lucene.codecs.mockrandom.MockRandomPostingsFormat;
 import org.apache.lucene.codecs.simpletext.SimpleTextCodec;
 import org.apache.lucene.index.RandomCodec;
 import org.apache.lucene.search.RandomSimilarityProvider;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;  // javadocs
 import org.junit.internal.AssumptionViolatedException;
-
 import com.carrotsearch.randomizedtesting.RandomizedContext;
 
 import static org.apache.lucene.util.LuceneTestCase.*;
@@ -79,6 +76,25 @@ final class TestRuleSetupAndRestoreClass
    */
   HashSet<String> avoidCodecs;
 
+  static class ThreadNameFixingPrintStreamInfoStream extends PrintStreamInfoStream {
+
+    public ThreadNameFixingPrintStreamInfoStream(PrintStream out) {
+      super(out);
+    }
+
+    @Override
+    public void message(String component, String message) {
+      final String name;
+      if (Thread.currentThread().getName().startsWith("TEST-")) {
+        // The name of the main thread is way too
+        // long when looking at IW verbose output...
+        name = "main";
+      } else {
+        name = Thread.currentThread().getName();
+      }
+      stream.println(component + " " + messageID + " [" + new Date() + "; " + name + "]: " + message);    
+    }
+  }
 
   @Override
   protected void before() throws Exception {
@@ -111,29 +127,14 @@ final class TestRuleSetupAndRestoreClass
     final Random random = RandomizedContext.current().getRandom();
     final boolean v = random.nextBoolean();
     if (INFOSTREAM) {
-      InfoStream.setDefault(new PrintStreamInfoStream(System.out) {
-          @Override
-          public void message(String component, String message) {
-            final String name;
-            if (Thread.currentThread().getName().startsWith("TEST-")) {
-              // The name of the main thread is way too
-              // long when looking at IW verbose output...
-              name = "main";
-            } else {
-              name = Thread.currentThread().getName();
-            }
-            stream.println(component + " " + messageID + " [" + new Date() + "; " + name + "]: " + message);    
-          }
-        });
+      InfoStream.setDefault(new ThreadNameFixingPrintStreamInfoStream(System.out));
     } else if (v) {
       InfoStream.setDefault(new NullInfoStream());
     }
 
     Class<?> targetClass = RandomizedContext.current().getTargetClass();
     avoidCodecs = new HashSet<String>();
-    // TODO: Fix below code to use c.isAnnotationPresent(). It was changed
-    // to the null check to work around a bug in JDK 8 b78 (see LUCENE-4808).
-    if (targetClass.getAnnotation(SuppressCodecs.class) != null) {
+    if (targetClass.isAnnotationPresent(SuppressCodecs.class)) {
       SuppressCodecs a = targetClass.getAnnotation(SuppressCodecs.class);
       avoidCodecs.addAll(Arrays.asList(a.value()));
     }
@@ -191,7 +192,7 @@ final class TestRuleSetupAndRestoreClass
           return super.toString() + ": " + format.toString() + ", " + dvFormat.toString();
         }
       };
-    } else if ("SimpleText".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 9 && !shouldAvoidCodec("SimpleText"))) {
+    } else if ("SimpleText".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 9 && LuceneTestCase.rarely(random) && !shouldAvoidCodec("SimpleText"))) {
       codec = new SimpleTextCodec();
     } else if ("CheapBastard".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 8 && !shouldAvoidCodec("CheapBastard") && !shouldAvoidCodec("Lucene41"))) {
       // we also avoid this codec if Lucene41 is avoided, since thats the postings format it uses.

Modified: lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/executors.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/executors.txt?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/executors.txt (original)
+++ lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/executors.txt Tue May  7 11:20:55 2013
@@ -13,11 +13,11 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-# These methods spawn threads with vague names. Use a custom thread factory and name
-# threads so that you can tell (by its name) which executor it is associated with.
 # see Solr's DefaultSolrThreadFactory
 # see Lucene's NamedThreadFactory
 
+@defaultMessage spawns threads with vague names; use a custom thread factory and name threads so that you can tell (by its name) which executor it is associated with
+
 java.util.concurrent.Executors#newFixedThreadPool(int)
 java.util.concurrent.Executors#newSingleThreadExecutor()
 java.util.concurrent.Executors#newCachedThreadPool()

Modified: lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/servlet-api.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/servlet-api.txt?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/servlet-api.txt (original)
+++ lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/servlet-api.txt Tue May  7 11:20:55 2013
@@ -13,17 +13,19 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-# These methods from the Servlet API should not be used, because they are
-# either broken and slow in some environments (e.g., Jetty's UTF-8 readers),
-# or the parsing of request parameters is not using the correct encoding
-# without extra configuration in the servlet container:
-javax.servlet.ServletRequest#getReader()
+@defaultMessage Servlet API method is parsing request parameters without using the correct encoding if no extra configuration is given in the servlet container
+
 javax.servlet.ServletRequest#getParameter(java.lang.String) 
 javax.servlet.ServletRequest#getParameterMap() 
 javax.servlet.ServletRequest#getParameterNames() 
 javax.servlet.ServletRequest#getParameterValues(java.lang.String) 
-javax.servlet.ServletResponse#getWriter()
 
+javax.servlet.http.HttpServletRequest#getSession() @ Servlet API getter has side effect of creating sessions
+
+@defaultMessage Servlet API method is broken and slow in some environments (e.g., Jetty's UTF-8 readers)
+
+javax.servlet.ServletRequest#getReader()
+javax.servlet.ServletResponse#getWriter()
 javax.servlet.ServletInputStream#readLine(byte[],int,int) 
 javax.servlet.ServletOutputStream#print(boolean)
 javax.servlet.ServletOutputStream#print(char)

Modified: lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/tests.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/tests.txt?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/tests.txt (original)
+++ lucene/dev/branches/lucene4258/lucene/tools/forbiddenApis/tests.txt Tue May  7 11:20:55 2013
@@ -13,13 +13,10 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-# All classes should derive from LuceneTestCase
-junit.framework.TestCase
+junit.framework.TestCase @ All classes should derive from LuceneTestCase
 
-# Use RandomizedRunner's random instead
-java.util.Random#<init>()
+java.util.Random#<init>() @ Use RandomizedRunner's random instead
 
-# Don't depend on wall clock times
 # TODO: fix tests that do this!
-#java.lang.System#currentTimeMillis()
-#java.lang.System#nanoTime()
+#java.lang.System#currentTimeMillis() @ Don't depend on wall clock times
+#java.lang.System#nanoTime() @ Don't depend on wall clock times

Modified: lucene/dev/branches/lucene4258/lucene/tools/junit4/cached-timehints.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/lucene/tools/junit4/cached-timehints.txt?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/lucene/tools/junit4/cached-timehints.txt (original)
+++ lucene/dev/branches/lucene4258/lucene/tools/junit4/cached-timehints.txt Tue May  7 11:20:55 2013
@@ -929,7 +929,7 @@ org.apache.solr.core.TestQuerySenderList
 org.apache.solr.core.TestQuerySenderNoQuery=392,616,406,406,413,413,356
 org.apache.solr.core.TestSolrDeletionPolicy1=636,736,868,732,627,1449,788
 org.apache.solr.core.TestSolrDeletionPolicy2=377,450,376,388,413,459,373
-org.apache.solr.core.TestSolrDiscoveryProperties=1183,1243,5175,1041,1490,2090,1181
+org.apache.solr.core.TestCoreDiscovery=1183,1243,5175,1041,1490,2090,1181
 org.apache.solr.core.TestSolrIndexConfig=491,411,360,426,407,414,369
 org.apache.solr.core.TestSolrXMLSerializer=29,24,33,39,164,65,46
 org.apache.solr.core.TestXIncludeConfig=158,139,147,115,1494,112,318

Modified: lucene/dev/branches/lucene4258/solr/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/solr/CHANGES.txt?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/solr/CHANGES.txt (original)
+++ lucene/dev/branches/lucene4258/solr/CHANGES.txt Tue May  7 11:20:55 2013
@@ -38,6 +38,84 @@ TBD...
 Detailed Change List
 ----------------------
 
+==================  4.4.0 ==================
+
+Versions of Major Components
+---------------------
+Apache Tika 1.3
+Carrot2 3.6.2
+Velocity 1.7 and Velocity Tools 2.0
+Apache UIMA 2.3.1
+Apache ZooKeeper 3.4.5
+
+Upgrading from Solr 4.3.0
+----------------------
+
+* SOLR-4778: The signature of LogWatcher.registerListener has changed, from
+  (ListenerConfig, CoreContainer) to (ListenerConfig).  Users implementing their
+  own LogWatcher classes will need to change their code accordingly.
+
+Detailed Change List
+----------------------
+
+New Features
+----------------------
+
+* SOLR-3251: Dynamically add fields to schema. (Steve Rowe, Robert Muir, yonik)   
+
+* SOLR-4761: Add option to plugin a merged segment warmer into solrconfig.xml
+  (Mark Miller, Mike McCandless, Robert Muir)
+
+* SOLR-3240: Add "spellcheck.collateMaxCollectDocs" option so that when testing
+  potential Collations against the index, SpellCheckComponent will only collect
+  n documents, thereby estimating the hit-count.  This is a performance optimization
+  in cases where exact hit-counts are unnecessary.  Also, when "collateExtendedResults"
+  is false, this optimization is always made (James Dyer).
+
+Bug Fixes
+----------------------
+
+* SOLR-4741: Deleting a collection should set DELETE_DATA_DIR to true. 
+  (Mark Miller)
+  
+* SOLR-4333: edismax parser to not double-escape colons if already escaped by 
+  the client application (James Dyer, Robert J. van der Boon)
+
+* SOLR-4752: There are some minor bugs in the Collections API parameter
+  validation. (Mark Miller)
+
+* SOLR-4776: Solrj doesn't return "between" count in range facets
+  (Philip K. Warren via shalin)
+
+* SOLR-4616: HitRatio on caches is now exposed over JMX MBeans as a float.
+  (Greg Bowyer)
+
+Other Changes
+----------------------
+
+* SOLR-4737: Update Guava to 14.0.1 (Mark Miller)
+
+* SOLR-2079: Add option to pass HttpServletRequest in the SolrQueryRequest context map.
+  (Tomás Fernández Löbbe via Robert Muir)
+
+* SOLR-4738: Update Jetty to 8.1.10.v20130312 (Mark Miller, Robert Muir)
+
+* SOLR-4749: Clean up and refactor CoreContainer code around solr.xml and SolrCore
+  management. (Mark Miller)
+
+* SOLR-4547: Move logging of filenames on commit from INFO to DEBUG.
+  (Shawn Heisey, hossman)
+
+* SOLR-4757: Change the example to use the new solr.xml format and core 
+  discovery by directory structure. (Mark Miller)
+
+* SOLR-4759: Velocity (/browse) template cosmetic cleanup.
+  (Mark Bennett, ehatcher)
+
+* SOLR-4778: LogWatcher init code moved out of CoreContainer (Alan Woodward)
+
+* SOLR-4784: Make class LuceneQParser public (janhoy)
+
 ==================  4.3.0 ==================
 
 Versions of Major Components
@@ -51,7 +129,26 @@ Apache ZooKeeper 3.4.5
 Upgrading from Solr 4.2.0
 ----------------------
 
-(No upgrade instructions yet)
+* In the schema REST API, the output path for copyFields and dynamicFields 
+  has been changed from all lowercase "copyfields" and "dynamicfields" to 
+  camelCase "copyFields" and "dynamicFields", respectively, to align with all 
+  other schema REST API outputs, which use camelCase.  The URL format remains 
+  the same: all resource names are lowercase.  See SOLR-4623 for details.
+  
+* Slf4j/logging jars are no longer included in the Solr webapp. All logging 
+  jars are now in example/lib/ext. Changing logging impls is now as easy as 
+  updating the jars in this folder with those necessary for the logging impl 
+  you would like. If you are using another webapp container, these jars will 
+  need to go in the corresponding location for that container. 
+  In conjunction, the dist-excl-slf4j and dist-war-excl-slf4 build targets 
+  have been removed since they are redundent.  See the Slf4j documentation, 
+  SOLR-3706, and SOLR-4651 for more details.
+
+* The hardcoded SolrCloud defaults for 'hostContext="solr"' and 
+  'hostPort="8983"' have been deprecated and will be removed in Solr 5.0.  
+  Existing solr.xml files that do not have these options explicitly specified 
+  should be updated accordingly.  See SOLR-4622 for more details.
+
 
 Detailed Change List
 ----------------------
@@ -59,31 +156,152 @@ Detailed Change List
 New Features
 ----------------------
 
-* SOLR-4196 (and others). Solr.xml is being deprecated in favor of a simple
-  properties file. In the absence of a <solr_home>/solr.xml but the presence of
-  <solr_home>/solr.properties, two things will happen
-  1> The attributes that, in the solr.xml file, were in the <solr> and <cores> tags will
-      be read from the solr.properties
-  2> The <solr_home> will be walked and any cores fond will be discovered, which will
-     be inferred by the presence of a "core.properties" file which will contain the 
-     data formerly in the individual <core> tags. The implication here is that there will
-     be no individual core information in the solr.properties file.
-  See the discussion on the wiki page titled "Core Discovery (4.3 and beyond)" for
-  the formats of both solr.properties and the individual core.properties files
+* SOLR-4648 PreAnalyzedUpdateProcessorFactory allows using the functionality
+  of PreAnalyzedField with other field types. See javadoc for details and
+  examples. (Andrzej Bialecki)
+  
+* SOLR-4623: Provide REST API read access to all elements of the live schema.
+  Add a REST API request to return the entire live schema, in JSON, XML, and
+  schema.xml formats.  Move REST API methods from package org.apache.solr.rest
+  to org.apache.solr.rest.schema, and rename base functionality REST API
+  classes to remove the current schema focus, to prepare for other non-schema
+  REST APIs.  Change output path for copyFields and dynamicFields from
+  "copyfields" and "dynamicfields" (all lowercase) to "copyFields" and
+  "dynamicFields", respectively, to align with all other REST API outputs, which
+  use camelCase.
+  (Steve Rowe)
+    
+* SOLR-4658: In preparation for REST API requests that can modify the schema,
+  a "managed schema" is introduced.  
+  Add '<schemaFactory class="ManagedSchemaFactory" mutable="true"/>' to solrconfig.xml
+  in order to use it, and to enable schema modifications via REST API requests.
+  (Steve Rowe, Robert Muir)
+
+* SOLR-4656: Added two new highlight parameters, hl.maxMultiValuedToMatch and 
+  hl.maxMultiValuedToExamine. maxMultiValuedToMatch stops looking for snippets after 
+  finding the specified number of matches, no matter how far into the multivalued field
+  you've gone. maxMultiValuedToExamine stops looking for matches after the specified
+  number of multiValued entries have been examined. If both are specified, the limit
+  hit first stops the loop. Also this patch cuts down on the copying of the document 
+  entries during highlighting. These optimizations are probably unnoticeable unless
+  there are a large number of entries in the multiValued field. Conspicuously, this will
+  prevent the "best" match from being found if it appears later in the MV list than the
+  cutoff specified by either of these params. (Erick Erickson)
+
+* SOLR-4675: Improve PostingsSolrHighlighter to support per-field/query-time overrides
+  and add additional configuration parameters. See the javadocs for more details and
+  examples. (Robert Muir)
+
+* SOLR-3755: A new collections api to add additional shards dynamically by splitting
+  existing shards. (yonik, Anshum Gupta, shalin)
+
+* SOLR-4530: DIH: Provide configuration to use Tika's IdentityHtmlMapper
+  (Alexandre Rafalovitch via shalin)
+  
+* SOLR-4662: Discover SolrCores by directory structure rather than defining them
+  in solr.xml. Also, change the format of solr.xml to be closer to that of solrconfig.xml.
+  This version of Solr will ship the example in the old style, but you can manually
+  try the new style. Solr 4.4 will ship with the new style, and Solr 5.0 will remove
+  support for the old style. (Erick Erickson, Mark Miller)
+  Additional Work:
+  - SOLR-4347: Ensure that newly-created cores via Admin handler are persisted in solr.xml
+  (Erick Erickson)
+  - SOLR-1905: Cores created by the admin request handler should be persisted to solr.xml.
+  Also fixed a problem whereby properties like solr.solr.datadir would be persisted
+  to solr.xml. Also, cores that didn't happen to be loaded were not persisted. 
   (Erick Erickson)
 
+* SOLR-4717/SOLR-1351: SimpleFacets now work with localParams allowing faceting on the 
+  same field multiple ways (ryan, Uri Boness)
+
+* SOLR-4671: CSVResponseWriter now supports pseudo fields. (ryan, nihed mbarek)
+
+* SOLR-4358: HttpSolrServer sends the stream name and exposes 'useMultiPartPost'
+  (Karl Wright via ryan)
+   
+
 Bug Fixes
 ----------------------
 
 * SOLR-4543: setting shardHandlerFactory in solr.xml/solr.properties does not work.
   (Ryan Ernst, Robert Muir via Erick Erickson)
 
+* SOLR-4634: Fix scripting engine tests to work with Java 8's "Nashorn" Javascript
+  implementation.  (Uwe Schindler)
+
+* SOLR-4636: If opening a reader fails for some reason when opening a SolrIndexSearcher,
+  a Directory can be left unreleased. (Mark Miller)
+
+* SOLR-4405: Admin UI - admin-extra files are not rendered into the core-menu (steffkes)
+
+* SOLR-3956: Fixed group.facet=true to work with negative facet.limit
+  (Chris van der Merwe, hossman)
+  
+* SOLR-4650: copyField doesn't work with source globs that don't match any
+  explicit or dynamic fields.  This regression was introduced in Solr 4.2.
+  (Daniel Collins, Steve Rowe)
+  
+* SOLR-4641: Schema now throws exception on illegal field parameters.  (Robert Muir)
+
+* SOLR-3758: Fixed SpellCheckComponent to work consistently with distributed grouping
+  (James Dyer)
+
+* SOLR-4652: Fix broken behavior with shared libraries in resource loader for
+  solr.xml plugins.  (Ryan Ernst, Robert Muir, Uwe Schindler)
+
+* SOLR-4664: ZkStateReader should update aliases on construction. 
+  (Mark Miller, Elodie Sannier)
+
+* SOLR-4682: CoreAdminRequest.mergeIndexes can not merge multiple cores or indexDirs.
+  (Jason.D.Cao via shalin)
+
+* SOLR-4581: When faceting on numeric fields in Solr 4.2, negative values (constraints)
+  were sorted incorrectly. (Alexander Buhr, shalin, yonik)
+
+* SOLR-4699: The System admin handler should not assume a file system based data directory 
+  location. (Mark Miller)
+
+* SOLR-4695: Fix core admin SPLIT action to be useful with non-cloud setups (shalin)
+
+* SOLR-4680: Correct example spellcheck configuration's queryAnalyzerFieldType and
+  use "text" field instead of narrower "name" field (ehatcher, Mark Bennett)
+
+* SOLR-4702: Fix example /browse "Did you mean?" suggestion feature. (ehatcher, Mark Bennett)
+
+* SOLR-4710: You cannot delete a collection fully from ZooKeeper unless all nodes are up and 
+  functioning correctly. (Mark Miller)
+
+* SOLR-4487: SolrExceptions thrown by HttpSolrServer will now contain the 
+  proper HTTP status code returned by the remote server, even if that status 
+  code is not something Solr itself returned -- eg: from the Servlet Container, 
+  or an intermediate HTTP Proxy (hossman)
+
+* SOLR-4661: Admin UI Replication details now correctly displays the current
+  replicable generation/version of the master. (hossman)
+
+* SOLR-4716,SOLR-4584: SolrCloud request proxying does not work on Tomcat and
+  perhaps other non Jetty containers. (Po Rui, Yago Riveiro via Mark Miller)
+
+* SOLR-4746: Distributed grouping used a NamedList instead of a SimpleOrderedMap
+  for the top level group commands, causing output formatting differences
+  compared to non-distributed grouping. (yonik)
+
+* SOLR-4705: Fixed bug causing NPE when querying a single replica in SolrCloud 
+  using the shards param (Raintung Li, hossman)
+
+* SOLR-4729: LukeRequestHandler: Using a dynamic copyField source that is
+  not also a dynamic field triggers error message 'undefined field: "(glob)"'.
+  (Adam Hahn, hossman, Steve Rowe)
+
 Optimizations
 ----------------------
 
 Other Changes
 ----------------------
 
+* SOLR-4653: Solr configuration should log inaccessible/ non-existent relative paths in lib 
+  dir=... (Dawid Weiss)
+
 * SOLR-4317: SolrTestCaseJ4: Can't avoid "collection1" convention (Tricia Jenkins, via Erick Erickson)
 
 * SOLR-4571: SolrZkClient#setData should return Stat object. (Mark Miller)
@@ -95,6 +313,24 @@ Other Changes
   (Ryan Ernst via Robert Muir)
 
 * SOLR-4607: Use noggit 0.5 release jar rather than a forked copy.  (Yonik Seeley, Robert Muir)
+  
+* SOLR-3706: Ship setup to log with log4j. (ryan, Mark Miller)
+
+* SOLR-4651: Remove dist-excl-slf4j build target. (Shawn Heisey)
+
+* SOLR-4622: The hardcoded SolrCloud defaults for 'hostContext="solr"' and 
+  'hostPort="8983"' have been deprecated and will be removed in Solr 5.0.  
+  Existing solr.xml files that do not have these options explicitly specified 
+  should be updated accordingly.  (hossman)
+
+* SOLR-4672: Requests attempting to use SolrCores which had init failures 
+  (that would be reported by CoreAdmin STATUS requests) now result in 500 
+  error responses with the details about the init failure, instead of 404 
+  error responses.  (hossman)
+
+* SOLR-4730: Make the wiki link more prominent in the release documentation.
+  (Uri Laserson via Robert Muir)
+  
 
 ==================  4.2.1 ==================
 
@@ -206,6 +442,33 @@ Bug Fixes
 * SOLR-4589: Fixed CPU spikes and poor performance in lazy field loading 
   of multivalued fields. (hossman)
 
+* SOLR-4608: Update Log replay and PeerSync replay should use the default
+  processor chain to update the index. (Ludovic Boutros, yonik)
+
+* SOLR-4625: The solr (lucene syntax) query parser lost top-level boost
+  values and top-level phrase slops on queries produced by nested
+  sub-parsers. (yonik)
+
+* SOLR-4624: CachingDirectoryFactory does not need to support forceNew any 
+  longer and it appears to be causing a missing close directory bug. forceNew
+  is no longer respected and will be removed in 4.3. (Mark Miller)
+
+* SOLR-3819: Grouped faceting (group.facet=true) did not respect filter
+  exclusions. (Petter Remen, yonik)
+
+* SOLR-4637: Replication can sometimes wait until shutdown or core unload until
+  removing some tmp directories. (Mark Miller)
+  
+* SOLR-4638: DefaultSolrCoreState#getIndexWriter(null) is a way to avoid
+  creating the IndexWriter earlier than necessary, but it's not 
+  implemented quite right. (Mark Miller)
+
+* SOLR-4640: CachingDirectoryFactory can fail to close directories in some race
+  conditions. (Mark Miller)
+
+* SOLR-4642: QueryResultKey is not calculating the correct hashCode for filters.
+  (Joel Bernstein via Mark Miller)
+
 Optimizations
 ----------------------
 

Modified: lucene/dev/branches/lucene4258/solr/SYSTEM_REQUIREMENTS.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/solr/SYSTEM_REQUIREMENTS.txt?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/solr/SYSTEM_REQUIREMENTS.txt (original)
+++ lucene/dev/branches/lucene4258/solr/SYSTEM_REQUIREMENTS.txt Tue May  7 11:20:55 2013
@@ -5,12 +5,9 @@ install at least Update 1! With all Java
 recommended to not use experimental `-XX` JVM options. It is also 
 recommended to always use the latest update version of your Java VM, 
 because bugs may affect Solr. An overview of known JVM bugs can be 
-found on http://wiki.apache.org/lucene-java/SunJavaBugs. 
+found on http://wiki.apache.org/lucene-java/JavaBugs. 
 
 CPU, disk and memory requirements are based on the many choices made in 
 implementing Solr (document size, number of documents, and number of 
 hits retrieved to name a few). The benchmarks page has some information 
 related to performance on particular platforms. 
-
-*To build Apache Solr from source, refer to the `BUILD.txt` file in 
-the distribution directory.* 

Modified: lucene/dev/branches/lucene4258/solr/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/solr/build.xml?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/solr/build.xml (original)
+++ lucene/dev/branches/lucene4258/solr/build.xml Tue May  7 11:20:55 2013
@@ -59,7 +59,6 @@
   <target name="run-example" depends="example"
           description="Run Solr interactively, via Jetty.  -Dexample.debug=true to enable JVM debugger">
     <property name="example.solr.home" location="example/solr"/>
-    <property name="example.data.dir" location="example/solr/data"/>
     <property name="example.debug.suspend" value="n"/>
     <property name="example.jetty.port" value="8983"/>
     <condition property="example.jvm.line" value="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=${example.debug.suspend},address=5005">
@@ -70,7 +69,6 @@
     <java jar="${example}/start.jar" fork="true" dir="${example}" maxmemory="${example.heap.size}">
       <jvmarg line="${example.jvm.line}"/>
       <sysproperty key="solr.solr.home" file="${example.solr.home}"/>
-      <sysproperty key="solr.data.dir" value="${example.data.dir}"/>
       <sysproperty key="jetty.port" value="${example.jetty.port}"/>
     </java>
   </target>
@@ -338,18 +336,7 @@
   
   <target name="dist"
           description="Creates the Solr distribution files."
-          depends="-dist-common, dist-war" />
- 
-  <target name="dist-excl-slf4j"
-          description="Creates the Solr distribution files without slf4j API or bindings."
-          depends="-dist-common, dist-war-excl-slf4j" />
- 
-  <target name="-dist-common"
-          depends="dist-solrj, dist-core, dist-test-framework, dist-contrib">
-    <!-- Targets common to dist and dist-excl-slf4j.
-         Only usable as a dependency
-    -->
-  </target>
+          depends="dist-solrj, dist-core, dist-test-framework, dist-contrib, dist-war" />
  
   <target name="dist-test-framework" depends="init-dist"
           description="Creates the Solr test-framework JAR.">
@@ -366,20 +353,10 @@
   <target name="dist-war"
           description="Creates the Solr WAR Distribution file.">
     <ant dir="webapp" target="dist" inheritall="false">
-      <property name="exclude.from.war" value="log4j-1.*" />
       <propertyset refid="uptodate.and.compiled.properties"/>
     </ant>
   </target>
   
-  <target name="dist-war-excl-slf4j"
-          description="Creates a Solr WAR Distribution file, excluding slf4j API and bindings.">
-    <ant dir="webapp" target="dist" inheritall="false">
-      <propertyset refid="uptodate.and.compiled.properties"/>
-      <property name="exclude.from.war" value="*slf4j*,log4j-*" />
-      <property name="solr.war.suffix" value="-excl-slf4j" />
-    </ant>
-  </target>
-
   <target name="prepare-release-no-sign" depends="clean, package, generate-maven-artifacts"/>
   <target name="prepare-release" depends="prepare-release-no-sign, sign-artifacts"/>
  

Modified: lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-extzk-start.sh
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-extzk-start.sh?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-extzk-start.sh (original)
+++ lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-extzk-start.sh Tue May  7 11:20:55 2013
@@ -24,7 +24,7 @@ cp -r -f example example4
 cp -r -f example example5
 cp -r -f example example6
 
-java -classpath lib/*:dist/*:build/lucene-libs/* org.apache.solr.cloud.ZkController "$zkaddress" 8983 example/solr/conf conf1
+java -classpath "example/solr-webapp/webapp/WEB-INF/lib/*:example/lib/ext/" org.apache.solr.cloud.ZkController "$zkaddress" 8983 example/solr/conf conf1
 
 cd example
 java -DzkHost="$zkaddress" -DnumShards=2 -DSTOP.PORT=7983 -DSTOP.KEY=key -jar start.jar 1>example.log 2>&1 &

Modified: lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-multi-start.sh
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-multi-start.sh?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-multi-start.sh (original)
+++ lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-multi-start.sh Tue May  7 11:20:55 2013
@@ -24,7 +24,7 @@ cp -r -f example example4
 cp -r -f example example5
 cp -r -f example example6
 
-java -classpath lib/*:dist/*:build/lucene-libs/* org.apache.solr.cloud.ZkCLI -cmd upconf -zkhost 127.0.0.1:9983 -solrhome example/multicore -runzk 8983
+java -classpath "example/solr-webapp/webapp/WEB-INF/lib/*:example/lib/ext/" org.apache.solr.cloud.ZkCLI -cmd upconf -zkhost 127.0.0.1:9983 -solrhome example/multicore -runzk 8983
 
 cd example
 java -DzkRun -DnumShards=2 -DSTOP.PORT=7983 -DSTOP.KEY=key -Dsolr.solr.home=multicore -jar start.jar 1>example.log 2>&1 &

Modified: lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-start.sh
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-start.sh?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-start.sh (original)
+++ lucene/dev/branches/lucene4258/solr/cloud-dev/solrcloud-start.sh Tue May  7 11:20:55 2013
@@ -25,7 +25,7 @@ cp -r -f example example4
 cp -r -f example example5
 cp -r -f example example6
 
-java -classpath "example/solr-webapp/webapp/WEB-INF/lib/*" org.apache.solr.cloud.ZkCLI -cmd bootstrap -zkhost 127.0.0.1:9983 -solrhome example/solr -runzk 8983
+java -classpath "example/solr-webapp/webapp/WEB-INF/lib/*:example/lib/ext/" org.apache.solr.cloud.ZkCLI -cmd bootstrap -zkhost 127.0.0.1:9983 -solrhome example/solr -runzk 8983
 
 cd example
 java -DzkRun -DnumShards=2 -DSTOP.PORT=7983 -DSTOP.KEY=key -jar start.jar 1>example.log 2>&1 &

Modified: lucene/dev/branches/lucene4258/solr/common-build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/solr/common-build.xml?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/solr/common-build.xml (original)
+++ lucene/dev/branches/lucene4258/solr/common-build.xml Tue May  7 11:20:55 2013
@@ -24,10 +24,6 @@
   <dirname file="${ant.file.common-solr}" property="common-solr.dir"/>
   
   <property name="Name" value="Solr" />
-  <property name="version" value="5.0-SNAPSHOT"/>
-  <property name="fullname" value="${ant.project.name}"/>
-  <property name="fullnamever" value="${fullname}-${version}"/>
-  <property name="final.name" value="${fullnamever}"/>
   
   <!-- solr uses 1.7 -->
   <property name="javac.source" value="1.7"/>
@@ -43,7 +39,6 @@
   <property name="tests.userdir" location="src/test-files"/>
   <property name="example" location="${common-solr.dir}/example" />
   <property name="javadoc.dir" location="${dest}/docs"/>
-  <property name="tests.loggingfile" location="${common-solr.dir}/testlogging.properties"/>
   <property name="tests.cleanthreads.sysprop" value="perClass"/>
 
   <property name="changes.target.dir" location="${dest}/docs/changes"/>
@@ -51,6 +46,9 @@
 
   <import file="${common-solr.dir}/../lucene/module-build.xml"/>
 
+  <!-- backwards compatibility with existing targets/tasks; TODO: remove this! -->
+  <property name="fullnamever" value="${final.name}"/>
+
   <path id="additional.dependencies">
   	<fileset dir="${common-solr.dir}/core/lib" excludes="${common.classpath.excludes}"/>
   	<fileset dir="${common-solr.dir}/solrj/lib" excludes="${common.classpath.excludes}"/>
@@ -59,16 +57,6 @@
   	<fileset dir="lib" excludes="${common.classpath.excludes}" erroronmissingdir="false"/>
   </path>
 
-  <!-- Solr Specification Version
-
-       This will be used in the Manifest file, and therefore must
-       match the pattern "digit+{.digit+}*"
-
-       By default, this should be set to "X.Y.M.${dateversion}"
-       where X.Y.M is the last version released (on this branch).
-    -->
-  <property name="solr.spec.version" value="5.0.0.${dateversion}" />
-  
   <!-- defined here to be able to make the forbidden-api checker correctly
    reference it. 'ivy.xml' is also referencing this property. -->
   <property name="commons-io.version" value="2.1" />
@@ -170,7 +158,7 @@
     <attribute name="excludes" default="**/pom.xml,**/*.iml"/>
     <attribute name="metainf.source.dir" default="${common-solr.dir}"/>
     <attribute name="implementation.title" default="org.apache.solr"/>
-    <attribute name="spec.version" default="${solr.spec.version}"/>
+    <attribute name="spec.version" default="${spec.version}"/>
     <attribute name="manifest.file" default="${manifest.file}"/>
     <element name="nested" optional="true" implicit="true"/>
     <sequential>

Modified: lucene/dev/branches/lucene4258/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4258/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java?rev=1479862&r1=1479861&r2=1479862&view=diff
==============================================================================
--- lucene/dev/branches/lucene4258/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java (original)
+++ lucene/dev/branches/lucene4258/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java Tue May  7 11:20:55 2013
@@ -17,7 +17,10 @@ package org.apache.solr.handler.clusteri
  * limitations under the License.
  */
 
-import java.io.*;
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -278,8 +281,8 @@ public class CarrotClusteringEngine exte
       attributeBuilder.stemmerFactory(LuceneCarrot2StemmerFactory.class);
     }
 
-    // Pass the schema to SolrStopwordsCarrot2LexicalDataFactory.
-    initAttributes.put("solrIndexSchema", core.getSchema());
+    // Pass the schema (via the core) to SolrStopwordsCarrot2LexicalDataFactory.
+    initAttributes.put("solrCore", core);
 
     // Customize Carrot2's resource lookup to first look for resources
     // using Solr's resource loader. If that fails, try loading from the classpath.
@@ -303,7 +306,7 @@ public class CarrotClusteringEngine exte
       ct.setContextClassLoader(prev);
     }
 
-    SchemaField uniqueField = core.getSchema().getUniqueKeyField();
+    SchemaField uniqueField = core.getLatestSchema().getUniqueKeyField();
     if (uniqueField == null) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
           CarrotClusteringEngine.class.getSimpleName() + " requires the schema to have a uniqueKeyField");