You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2011/02/09 10:36:03 UTC

svn commit: r1068809 [15/36] - in /lucene/dev/branches/docvalues: ./ dev-tools/eclipse/ dev-tools/idea/.idea/ dev-tools/idea/.idea/copyright/ dev-tools/idea/lucene/ dev-tools/idea/lucene/contrib/ant/ dev-tools/idea/lucene/contrib/queryparser/ dev-tools...

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestAssertions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestAssertions.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestAssertions.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestAssertions.java Wed Feb  9 09:35:27 2011
@@ -35,34 +35,45 @@ public class TestAssertions extends Luce
   }
   
   static class TestAnalyzer1 extends Analyzer {
+    @Override
     public final TokenStream tokenStream(String s, Reader r) { return null; }
+    @Override
     public final TokenStream reusableTokenStream(String s, Reader r) { return null; }
   }
 
   static final class TestAnalyzer2 extends Analyzer {
+    @Override
     public TokenStream tokenStream(String s, Reader r) { return null; }
+    @Override
     public TokenStream reusableTokenStream(String s, Reader r) { return null; }
   }
 
   static class TestAnalyzer3 extends Analyzer {
+    @Override
     public TokenStream tokenStream(String s, Reader r) { return null; }
+    @Override
     public TokenStream reusableTokenStream(String s, Reader r) { return null; }
   }
 
   static class TestAnalyzer4 extends Analyzer {
+    @Override
     public final TokenStream tokenStream(String s, Reader r) { return null; }
+    @Override
     public TokenStream reusableTokenStream(String s, Reader r) { return null; }
   }
 
   static class TestTokenStream1 extends TokenStream {
+    @Override
     public final boolean incrementToken() { return false; }
   }
 
   static final class TestTokenStream2 extends TokenStream {
+    @Override
     public boolean incrementToken() { return false; }
   }
 
   static class TestTokenStream3 extends TokenStream {
+    @Override
     public boolean incrementToken() { return false; }
   }
 

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestExternalCodecs.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestExternalCodecs.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestExternalCodecs.java Wed Feb  9 09:35:27 2011
@@ -18,6 +18,7 @@ package org.apache.lucene;
  */
 
 import org.apache.lucene.util.*;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.index.*;
 import org.apache.lucene.document.*;
 import org.apache.lucene.search.*;
@@ -64,6 +65,7 @@ public class TestExternalCodecs extends 
         return t2.length-t1.length;
       }
 
+      @Override
       public boolean equals(Object other) {
         return this == other;
       }
@@ -103,6 +105,8 @@ public class TestExternalCodecs extends 
     static class RAMField extends Terms {
       final String field;
       final SortedMap<String,RAMTerm> termToDocs = new TreeMap<String,RAMTerm>();
+      long sumTotalTermFreq;
+
       RAMField(String field) {
         this.field = field;
       }
@@ -113,6 +117,11 @@ public class TestExternalCodecs extends 
       }
 
       @Override
+      public long getSumTotalTermFreq() {
+        return sumTotalTermFreq;
+      }
+
+      @Override
       public TermsEnum iterator() {
         return new RAMTermsEnum(RAMOnlyCodec.RAMField.this);
       }
@@ -125,6 +134,7 @@ public class TestExternalCodecs extends 
 
     static class RAMTerm {
       final String term;
+      long totalTermFreq;
       final List<RAMDoc> docs = new ArrayList<RAMDoc>();
       public RAMTerm(String term) {
         this.term = term;
@@ -197,14 +207,16 @@ public class TestExternalCodecs extends 
       }
 
       @Override
-      public void finishTerm(BytesRef text, int numDocs) {
-        assert numDocs > 0;
-        assert numDocs == current.docs.size();
+      public void finishTerm(BytesRef text, TermStats stats) {
+        assert stats.docFreq > 0;
+        assert stats.docFreq == current.docs.size();
+        current.totalTermFreq = stats.totalTermFreq;
         field.termToDocs.put(current.term, current);
       }
 
       @Override
-      public void finish() {
+      public void finish(long sumTotalTermFreq) {
+        field.sumTotalTermFreq = sumTotalTermFreq;
       }
     }
 
@@ -243,7 +255,6 @@ public class TestExternalCodecs extends 
       }
     }
 
-
     // Classes for reading from the postings state
     static class RAMFieldsEnum extends FieldsEnum {
       private final RAMPostings postings;
@@ -344,7 +355,8 @@ public class TestExternalCodecs extends 
       }
 
       @Override
-      public void cacheCurrentTerm() {
+      public long totalTermFreq() {
+        return ramField.termToDocs.get(current).totalTermFreq;
       }
 
       @Override
@@ -546,7 +558,7 @@ public class TestExternalCodecs extends 
       // Terms dict
       success = false;
       try {
-        FieldsConsumer ret = new PrefixCodedTermsWriter(indexWriter, state, pulsingWriter, reverseUnicodeComparator);
+        FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, pulsingWriter, reverseUnicodeComparator);
         success = true;
         return ret;
       } finally {
@@ -587,15 +599,15 @@ public class TestExternalCodecs extends 
       // Terms dict reader
       success = false;
       try {
-        FieldsProducer ret = new PrefixCodedTermsReader(indexReader,
-                                                         state.dir,
-                                                         state.fieldInfos,
-                                                         state.segmentInfo.name,
-                                                         pulsingReader,
-                                                         state.readBufferSize,
-                                                         reverseUnicodeComparator,
-                                                         StandardCodec.TERMS_CACHE_SIZE,
-                                                         state.codecId);
+        FieldsProducer ret = new BlockTermsReader(indexReader,
+                                                  state.dir,
+                                                  state.fieldInfos,
+                                                  state.segmentInfo.name,
+                                                  pulsingReader,
+                                                  state.readBufferSize,
+                                                  reverseUnicodeComparator,
+                                                  StandardCodec.TERMS_CACHE_SIZE,
+                                                  state.codecId);
         success = true;
         return ret;
       } finally {
@@ -612,7 +624,7 @@ public class TestExternalCodecs extends 
     @Override
     public void files(Directory dir, SegmentInfo segmentInfo, String codecId, Set<String> files) throws IOException {
       StandardPostingsReader.files(dir, segmentInfo, codecId, files);
-      PrefixCodedTermsReader.files(dir, segmentInfo, codecId, files);
+      BlockTermsReader.files(dir, segmentInfo, codecId, files);
       FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files);
     }
 
@@ -633,13 +645,15 @@ public class TestExternalCodecs extends 
     
     
     final int NUM_DOCS = 173;
-    Directory dir = newDirectory();
+    MockDirectoryWrapper dir = newDirectory();
+    dir.setCheckIndexOnClose(false); // we use a custom codec provider
     IndexWriter w = new IndexWriter(
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, true)).
             setCodecProvider(provider).
             setMergePolicy(newLogMergePolicy(3))
     );
+    w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     // uses default codec:
     doc.add(newField("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED));
@@ -661,7 +675,7 @@ public class TestExternalCodecs extends 
     }
     w.deleteDocuments(new Term("id", "77"));
 
-    IndexReader r = IndexReader.open(w);
+    IndexReader r = IndexReader.open(w, true);
     IndexReader[] subs = r.getSequentialSubReaders();
     // test each segment
     for(int i=0;i<subs.length;i++) {
@@ -671,7 +685,7 @@ public class TestExternalCodecs extends 
     testTermsOrder(r);
     
     assertEquals(NUM_DOCS-1, r.numDocs());
-    IndexSearcher s = new IndexSearcher(r);
+    IndexSearcher s = newSearcher(r);
     assertEquals(NUM_DOCS-1, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits);
     assertEquals(NUM_DOCS-1, s.search(new TermQuery(new Term("field2", "pulsing")), 1).totalHits);
     r.close();
@@ -679,10 +693,10 @@ public class TestExternalCodecs extends 
 
     w.deleteDocuments(new Term("id", "44"));
     w.optimize();
-    r = IndexReader.open(w);
+    r = IndexReader.open(w, true);
     assertEquals(NUM_DOCS-2, r.maxDoc());
     assertEquals(NUM_DOCS-2, r.numDocs());
-    s = new IndexSearcher(r);
+    s = newSearcher(r);
     assertEquals(NUM_DOCS-2, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits);
     assertEquals(NUM_DOCS-2, s.search(new TermQuery(new Term("field2", "pulsing")), 1).totalHits);
     assertEquals(1, s.search(new TermQuery(new Term("id", "76")), 1).totalHits);

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java Wed Feb  9 09:35:27 2011
@@ -91,8 +91,8 @@ public class TestMergeSchedulerExternal 
     
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer()).setMergeScheduler(new MyMergeScheduler())
-        .setMaxBufferedDocs(2).setRAMBufferSizeMB(
-            IndexWriterConfig.DISABLE_AUTO_FLUSH));
+        .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+        .setMergePolicy(newLogMergePolicy()));
     LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();
     logMP.setMergeFactor(10);
     for(int i=0;i<20;i++)

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestSearch.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestSearch.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestSearch.java Wed Feb  9 09:35:27 2011
@@ -74,8 +74,11 @@ public class TestSearch extends LuceneTe
       Directory directory = newDirectory();
       Analyzer analyzer = new MockAnalyzer();
       IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
-      LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFile);
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+      }
+      
       IndexWriter writer = new IndexWriter(directory, conf);
 
       String[] docs = {
@@ -90,6 +93,7 @@ public class TestSearch extends LuceneTe
       for (int j = 0; j < docs.length; j++) {
         Document d = new Document();
         d.add(newField("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
+        d.add(newField("id", ""+j, Field.Index.NOT_ANALYZED_NO_NORMS));
         writer.addDocument(d);
       }
       writer.close();
@@ -106,13 +110,20 @@ public class TestSearch extends LuceneTe
       };
       ScoreDoc[] hits = null;
 
+      Sort sort = new Sort(new SortField[] {
+          SortField.FIELD_SCORE,
+          new SortField("id", SortField.INT)});
+
       QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "contents", analyzer);
       parser.setPhraseSlop(4);
       for (int j = 0; j < queries.length; j++) {
         Query query = parser.parse(queries[j]);
         out.println("Query: " + query.toString("contents"));
+        if (VERBOSE) {
+          System.out.println("TEST: query=" + query);
+        }
 
-        hits = searcher.search(query, null, 1000).scoreDocs;
+        hits = searcher.search(query, null, 1000, sort).scoreDocs;
 
         out.println(hits.length + " total results");
         for (int i = 0 ; i < hits.length && i < 10; i++) {

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java Wed Feb  9 09:35:27 2011
@@ -80,8 +80,10 @@ public class TestSearchForDuplicates ext
       Directory directory = newDirectory();
       Analyzer analyzer = new MockAnalyzer();
       IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
-      LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFiles);
+      final MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFiles);
+      }
       IndexWriter writer = new IndexWriter(directory, conf);
       if (VERBOSE) {
         System.out.println("TEST: now build index");
@@ -93,9 +95,6 @@ public class TestSearchForDuplicates ext
       for (int j = 0; j < MAX_DOCS; j++) {
         Document d = new Document();
         d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
-
-        // NOTE: this ID_FIELD produces no tokens since
-        // MockAnalyzer discards numbers
         d.add(newField(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
         writer.addDocument(d);
       }
@@ -108,8 +107,15 @@ public class TestSearchForDuplicates ext
 
       Query query = parser.parse(HIGH_PRIORITY);
       out.println("Query: " + query.toString(PRIORITY_FIELD));
+      if (VERBOSE) {
+        System.out.println("TEST: search query=" + query);
+      }
+
+      final Sort sort = new Sort(new SortField[] {
+          SortField.FIELD_SCORE,
+          new SortField(ID_FIELD, SortField.INT)});
 
-      ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS).scoreDocs;
+      ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
       printHits(out, hits, searcher);
       checkHits(hits, MAX_DOCS, searcher);
 
@@ -124,7 +130,7 @@ public class TestSearchForDuplicates ext
       query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY);
       out.println("Query: " + query.toString(PRIORITY_FIELD));
 
-      hits = searcher.search(query, null, MAX_DOCS).scoreDocs;
+      hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
       printHits(out, hits, searcher);
       checkHits(hits, MAX_DOCS, searcher);
 
@@ -146,7 +152,7 @@ public class TestSearchForDuplicates ext
   private void checkHits(ScoreDoc[] hits, int expectedCount, IndexSearcher searcher) throws IOException {
     assertEquals("total results", expectedCount, hits.length);
     for (int i = 0 ; i < hits.length; i++) {
-      if ( i < 10 || (i > 94 && i < 105) ) {
+      if (i < 10 || (i > 94 && i < 105) ) {
         Document d = searcher.doc(hits[i].doc);
         assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
       }

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java Wed Feb  9 09:35:27 2011
@@ -38,13 +38,13 @@ public class TestNumericTokenStream exte
     final BytesRef bytes = new BytesRef();
     stream.reset();
     assertEquals(64, numericAtt.getValueSize());
-    assertEquals(lvalue, numericAtt.getRawValue());
     for (int shift=0; shift<64; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
       assertTrue("New token is available", stream.incrementToken());
       assertEquals("Shift value wrong", shift, numericAtt.getShift());
       final int hash = bytesAtt.toBytesRef(bytes);
       assertEquals("Hash incorrect", bytes.hashCode(), hash);
       assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), NumericUtils.prefixCodedToLong(bytes));
+      assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue());
       assertEquals("Type incorrect", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
     }
     assertFalse("More tokens available", stream.incrementToken());
@@ -61,13 +61,13 @@ public class TestNumericTokenStream exte
     final BytesRef bytes = new BytesRef();
     stream.reset();
     assertEquals(32, numericAtt.getValueSize());
-    assertEquals(ivalue, numericAtt.getRawValue());
     for (int shift=0; shift<32; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
       assertTrue("New token is available", stream.incrementToken());
       assertEquals("Shift value wrong", shift, numericAtt.getShift());
       final int hash = bytesAtt.toBytesRef(bytes);
       assertEquals("Hash incorrect", bytes.hashCode(), hash);
       assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), NumericUtils.prefixCodedToInt(bytes));
+      assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue());
       assertEquals("Type incorrect", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
     }
     assertFalse("More tokens available", stream.incrementToken());

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/TestToken.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/TestToken.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/TestToken.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/TestToken.java Wed Feb  9 09:35:27 2011
@@ -22,8 +22,11 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.Attribute;
 import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util._TestUtil;
 
 import java.io.StringReader;
+import java.util.HashMap;
 
 public class TestToken extends LuceneTestCase {
 
@@ -241,6 +244,22 @@ public class TestToken extends LuceneTes
       ts.addAttribute(TypeAttribute.class) instanceof Token);
   }
 
+  public void testAttributeReflection() throws Exception {
+    Token t = new Token("foobar", 6, 22, 8);
+    _TestUtil.assertAttributeReflection(t,
+      new HashMap<String,Object>() {{
+        put(CharTermAttribute.class.getName() + "#term", "foobar");
+        put(TermToBytesRefAttribute.class.getName() + "#bytes", new BytesRef("foobar"));
+        put(OffsetAttribute.class.getName() + "#startOffset", 6);
+        put(OffsetAttribute.class.getName() + "#endOffset", 22);
+        put(PositionIncrementAttribute.class.getName() + "#positionIncrement", 1);
+        put(PayloadAttribute.class.getName() + "#payload", null);
+        put(TypeAttribute.class.getName() + "#type", TypeAttribute.DEFAULT_TYPE);
+        put(FlagsAttribute.class.getName() + "#flags", 8);
+      }});
+  }
+
+
   public static <T extends AttributeImpl> T assertCloneIsEqual(T att) {
     @SuppressWarnings("unchecked")
     T clone = (T) att.clone();

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java Wed Feb  9 09:35:27 2011
@@ -19,7 +19,10 @@ package org.apache.lucene.analysis.token
 
 import org.apache.lucene.analysis.TestToken;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util._TestUtil;
 import java.nio.CharBuffer;
+import java.util.HashMap;
 import java.util.Formatter;
 import java.util.Locale;
 import java.util.regex.Pattern;
@@ -126,6 +129,15 @@ public class TestCharTermAttributeImpl e
     assertNotSame(buf, copy.buffer());
   }
   
+  public void testAttributeReflection() throws Exception {
+    CharTermAttributeImpl t = new CharTermAttributeImpl();
+    t.append("foobar");
+    _TestUtil.assertAttributeReflection(t, new HashMap<String,Object>() {{
+      put(CharTermAttribute.class.getName() + "#term", "foobar");
+      put(TermToBytesRefAttribute.class.getName() + "#bytes", new BytesRef("foobar"));
+    }});
+  }
+  
   public void testCharSequenceInterface() {
     final String s = "0123456789"; 
     final CharTermAttributeImpl t = new CharTermAttributeImpl();
@@ -215,6 +227,7 @@ public class TestCharTermAttributeImpl e
       public char charAt(int i) { return longTestString.charAt(i); }
       public int length() { return longTestString.length(); }
       public CharSequence subSequence(int start, int end) { return longTestString.subSequence(start, end); }
+      @Override
       public String toString() { return longTestString; }
     });
     assertEquals("4567890123456"+longTestString, t.toString());

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/document/TestDocument.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/document/TestDocument.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/document/TestDocument.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/document/TestDocument.java Wed Feb  9 09:35:27 2011
@@ -156,7 +156,7 @@ public class TestDocument extends Lucene
     writer.addDocument(makeDocumentWithFields());
     IndexReader reader = writer.getReader();
     
-    IndexSearcher searcher = new IndexSearcher(reader);
+    IndexSearcher searcher = newSearcher(reader);
     
     // search for something that does exists
     Query query = new TermQuery(new Term("keyword", "test1"));
@@ -238,7 +238,7 @@ public class TestDocument extends Lucene
     writer.addDocument(doc);
     
     IndexReader reader = writer.getReader();
-    IndexSearcher searcher = new IndexSearcher(reader);
+    IndexSearcher searcher = newSearcher(reader);
     
     Query query = new TermQuery(new Term("keyword", "test"));
     

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Wed Feb  9 09:35:27 2011
@@ -55,6 +55,7 @@ public class TestAddIndexes extends Luce
     writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
         new MockAnalyzer())
         .setOpenMode(OpenMode.CREATE));
+    writer.setInfoStream(VERBOSE ? System.out : null);
     // add 100 documents
     addDocs(writer, 100);
     assertEquals(100, writer.maxDoc());
@@ -156,6 +157,7 @@ public class TestAddIndexes extends Luce
 
     setUpDirs(dir, aux);
     IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
+    writer.setInfoStream(VERBOSE ? System.out : null);
     writer.addIndexes(aux);
 
     // Adds 10 docs, then replaces them with another 10
@@ -427,7 +429,7 @@ public class TestAddIndexes extends Luce
     );
 
     writer.addIndexes(aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)));
-    assertEquals(1060, writer.maxDoc());
+    assertEquals(1020, writer.maxDoc());
     assertEquals(1000, writer.getDocCount(0));
     writer.close();
     dir.close();
@@ -451,6 +453,7 @@ public class TestAddIndexes extends Luce
             setMaxBufferedDocs(100).
             setMergePolicy(newLogMergePolicy(10))
     );
+    writer.setInfoStream(VERBOSE ? System.out : null);
     writer.addIndexes(aux);
     assertEquals(30, writer.maxDoc());
     assertEquals(3, writer.getSegmentCount());
@@ -479,7 +482,7 @@ public class TestAddIndexes extends Luce
     );
 
     writer.addIndexes(aux, aux2);
-    assertEquals(1060, writer.maxDoc());
+    assertEquals(1040, writer.maxDoc());
     assertEquals(1000, writer.getDocCount(0));
     writer.close();
     dir.close();

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java Wed Feb  9 09:35:27 2011
@@ -131,6 +131,7 @@ public class TestAtomicUpdate extends Lu
         .setMaxBufferedDocs(7);
     ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(3);
     IndexWriter writer = new MockIndexWriter(directory, conf);
+    writer.setInfoStream(VERBOSE ? System.out : null);
 
     // Establish a base index of 100 docs:
     for(int i=0;i<100;i++) {

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Wed Feb  9 09:35:27 2011
@@ -17,20 +17,13 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
-import java.io.BufferedOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
 import java.io.PrintStream;
 import java.util.Arrays;
-import java.util.Enumeration;
 import java.util.List;
 import java.util.Random;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
@@ -38,11 +31,13 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.document.NumericField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.SimilarityProvider;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
@@ -76,39 +71,6 @@ public class TestBackwardsCompatibility 
   }
   */
 
-  /* Unzips zipName --> dirName, removing dirName
-     first */
-  public void unzip(File zipName, String destDirName) throws IOException {
-
-    ZipFile zipFile = new ZipFile(zipName);
-
-    Enumeration<? extends ZipEntry> entries = zipFile.entries();
-
-    String dirName = fullDir(destDirName);
-
-    File fileDir = new File(dirName);
-    rmDir(destDirName);
-
-    fileDir.mkdir();
-
-    while (entries.hasMoreElements()) {
-      ZipEntry entry = entries.nextElement();
-
-      InputStream in = zipFile.getInputStream(entry);
-      OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(fileDir, entry.getName())));
-
-      byte[] buffer = new byte[8192];
-      int len;
-      while((len = in.read(buffer)) >= 0) {
-        out.write(buffer, 0, len);
-      }
-
-      in.close();
-      out.close();
-    }
-
-    zipFile.close();
-  }
 /*
   public void testCreateCFS() throws IOException {
     String dirName = "testindex.cfs";
@@ -151,10 +113,9 @@ public class TestBackwardsCompatibility 
       if (VERBOSE) {
         System.out.println("TEST: index " + unsupportedNames[i]);
       }
-      unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), unsupportedNames[i]);
-
-      String fullPath = fullDir(unsupportedNames[i]);
-      Directory dir = newFSDirectory(new File(fullPath));
+      File oldIndxeDir = _TestUtil.getTempDir(unsupportedNames[i]);
+      _TestUtil.unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
 
       IndexReader reader = null;
       IndexWriter writer = null;
@@ -170,15 +131,7 @@ public class TestBackwardsCompatibility 
 
       try {
         writer = new IndexWriter(dir, newIndexWriterConfig(
-          TEST_VERSION_CURRENT, new MockAnalyzer())
-          .setMergeScheduler(new SerialMergeScheduler()) // no threads!
-        );
-        // TODO: Make IndexWriter fail on open!
-        if (random.nextBoolean()) {
-          writer.optimize();
-        } else {
-          reader = writer.getReader();
-        }
+          TEST_VERSION_CURRENT, new MockAnalyzer()));
         fail("IndexWriter creation should not pass for "+unsupportedNames[i]);
       } catch (IndexFormatTooOldException e) {
         // pass
@@ -187,17 +140,13 @@ public class TestBackwardsCompatibility 
           e.printStackTrace(System.out);
         }
       } finally {
-        if (reader != null) reader.close();
-        reader = null;
+        // we should fail to open IW, and so it should be null when we get here.
+        // However, if the test fails (i.e., IW did not fail on open), we need
+        // to close IW. However, if merges are run, IW may throw
+        // IndexFormatTooOldException, and we don't want to mask the fail()
+        // above, so close without waiting for merges.
         if (writer != null) {
-          try {
-            writer.close();
-          } catch (IndexFormatTooOldException e) {
-            // OK -- since IW gives merge scheduler a chance
-            // to merge at close, it's possible and fine to
-            // hit this exc here
-            writer.close(false);
-          }
+          writer.close(false);
         }
         writer = null;
       }
@@ -210,34 +159,37 @@ public class TestBackwardsCompatibility 
       assertTrue(bos.toString().contains(IndexFormatTooOldException.class.getName()));
 
       dir.close();
-      rmDir(unsupportedNames[i]);
+      _TestUtil.rmDir(oldIndxeDir);
     }
   }
   
   public void testOptimizeOldIndex() throws Exception {
     for(int i=0;i<oldNames.length;i++) {
-      unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
-
-      String fullPath = fullDir(oldNames[i]);
-      Directory dir = newFSDirectory(new File(fullPath));
+      if (VERBOSE) {
+        System.out.println("\nTEST: index=" + oldNames[i]);
+      }
+      File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
 
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer()));
+      w.setInfoStream(VERBOSE ? System.out : null);
       w.optimize();
       w.close();
 
       _TestUtil.checkIndex(dir);
       
       dir.close();
-      rmDir(oldNames[i]);
+      _TestUtil.rmDir(oldIndxeDir);
     }
   }
 
   public void testAddOldIndexes() throws IOException {
     for (String name : oldNames) {
-      unzip(getDataFile("index." + name + ".zip"), name);
-      String fullPath = fullDir(name);
-      Directory dir = newFSDirectory(new File(fullPath));
+      File oldIndxeDir = _TestUtil.getTempDir(name);
+      _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
 
       Directory targetDir = newDirectory();
       IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig(
@@ -249,15 +201,15 @@ public class TestBackwardsCompatibility 
       
       dir.close();
       targetDir.close();
-      rmDir(name);
+      _TestUtil.rmDir(oldIndxeDir);
     }
   }
 
   public void testAddOldIndexesReader() throws IOException {
     for (String name : oldNames) {
-      unzip(getDataFile("index." + name + ".zip"), name);
-      String fullPath = fullDir(name);
-      Directory dir = newFSDirectory(new File(fullPath));
+      File oldIndxeDir = _TestUtil.getTempDir(name);
+      _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
+      Directory dir = newFSDirectory(oldIndxeDir);
       IndexReader reader = IndexReader.open(dir);
       
       Directory targetDir = newDirectory();
@@ -271,23 +223,25 @@ public class TestBackwardsCompatibility 
       
       dir.close();
       targetDir.close();
-      rmDir(name);
+      _TestUtil.rmDir(oldIndxeDir);
     }
   }
 
   public void testSearchOldIndex() throws IOException {
     for(int i=0;i<oldNames.length;i++) {
-      unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
-      searchIndex(oldNames[i], oldNames[i]);
-      rmDir(oldNames[i]);
+      File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir);
+      searchIndex(oldIndxeDir, oldNames[i]);
+      _TestUtil.rmDir(oldIndxeDir);
     }
   }
 
   public void testIndexOldIndexNoAdds() throws IOException {
     for(int i=0;i<oldNames.length;i++) {
-      unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
-      changeIndexNoAdds(random, oldNames[i]);
-      rmDir(oldNames[i]);
+      File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir);
+      changeIndexNoAdds(random, oldIndxeDir);
+      _TestUtil.rmDir(oldIndxeDir);
     }
   }
 
@@ -296,9 +250,10 @@ public class TestBackwardsCompatibility 
       if (VERBOSE) {
         System.out.println("TEST: oldName=" + oldNames[i]);
       }
-      unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
-      changeIndexWithAdds(random, oldNames[i]);
-      rmDir(oldNames[i]);
+      File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir);
+      changeIndexWithAdds(random, oldIndxeDir, oldNames[i]);
+      _TestUtil.rmDir(oldIndxeDir);
     }
   }
 
@@ -311,13 +266,11 @@ public class TestBackwardsCompatibility 
     }
   }
 
-  public void searchIndex(String dirName, String oldName) throws IOException {
+  public void searchIndex(File indexDir, String oldName) throws IOException {
     //QueryParser parser = new QueryParser("contents", new MockAnalyzer());
     //Query query = parser.parse("handle:1");
 
-    dirName = fullDir(dirName);
-
-    Directory dir = newFSDirectory(new File(dirName));
+    Directory dir = newFSDirectory(indexDir);
     IndexSearcher searcher = new IndexSearcher(dir, true);
     IndexReader reader = searcher.getIndexReader();
 
@@ -349,7 +302,7 @@ public class TestBackwardsCompatibility 
         }
 
         TermFreqVector tfv = reader.getTermFreqVector(i, "utf8");
-        assertNotNull("docID=" + i + " index=" + dirName, tfv);
+        assertNotNull("docID=" + i + " index=" + indexDir.getName(), tfv);
         assertTrue(tfv instanceof TermPositionVector);
       } else
         // Only ID 7 is deleted
@@ -382,11 +335,9 @@ public class TestBackwardsCompatibility 
     return v0 - v1;
   }
 
-  public void changeIndexWithAdds(Random random, String dirName) throws IOException {
-    String origDirName = dirName;
-    dirName = fullDir(dirName);
+  public void changeIndexWithAdds(Random random, File oldIndexDir, String origOldName) throws IOException {
 
-    Directory dir = newFSDirectory(new File(dirName));
+    Directory dir = newFSDirectory(oldIndexDir);
     // open writer
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
     writer.setInfoStream(VERBOSE ? System.out : null);
@@ -397,7 +348,7 @@ public class TestBackwardsCompatibility 
 
     // make sure writer sees right total -- writer seems not to know about deletes in .del?
     final int expected;
-    if (compare(origDirName, "24") < 0) {
+    if (compare(origOldName, "24") < 0) {
       expected = 44;
     } else {
       expected = 45;
@@ -415,11 +366,11 @@ public class TestBackwardsCompatibility 
 
     // make sure we can do delete & setNorm against this segment:
     IndexReader reader = IndexReader.open(dir, false);
-    searcher = new IndexSearcher(reader);
+    searcher = newSearcher(reader);
     Term searchTerm = new Term("id", "6");
     int delCount = reader.deleteDocuments(searchTerm);
     assertEquals("wrong delete count", 1, delCount);
-    reader.setNorm(searcher.search(new TermQuery(new Term("id", "22")), 10).scoreDocs[0].doc, "content", (float) 2.0);
+    reader.setNorm(searcher.search(new TermQuery(new Term("id", "22")), 10).scoreDocs[0].doc, "content", searcher.getSimilarityProvider().get("content").encodeNormValue(2.0f));
     reader.close();
     searcher.close();
 
@@ -448,11 +399,9 @@ public class TestBackwardsCompatibility 
     dir.close();
   }
 
-  public void changeIndexNoAdds(Random random, String dirName) throws IOException {
-
-    dirName = fullDir(dirName);
+  public void changeIndexNoAdds(Random random, File oldIndexDir) throws IOException {
 
-    Directory dir = newFSDirectory(new File(dirName));
+    Directory dir = newFSDirectory(oldIndexDir);
 
     // make sure searching sees right # hits
     IndexSearcher searcher = new IndexSearcher(dir, true);
@@ -467,7 +416,7 @@ public class TestBackwardsCompatibility 
     Term searchTerm = new Term("id", "6");
     int delCount = reader.deleteDocuments(searchTerm);
     assertEquals("wrong delete count", 1, delCount);
-    reader.setNorm(22, "content", (float) 2.0);
+    reader.setNorm(22, "content", searcher.getSimilarityProvider().get("content").encodeNormValue(2.0f));
     reader.close();
 
     // make sure they "took":
@@ -495,13 +444,12 @@ public class TestBackwardsCompatibility 
     dir.close();
   }
 
-  public void createIndex(Random random, String dirName, boolean doCFS) throws IOException {
+  public File createIndex(Random random, String dirName, boolean doCFS) throws IOException {
 
-    rmDir(dirName);
-
-    dirName = fullDir(dirName);
-
-    Directory dir = newFSDirectory(new File(dirName));
+    File indexDir = _TestUtil.getTempDir(dirName);
+    _TestUtil.rmDir(indexDir);
+    Directory dir = newFSDirectory(indexDir);
+    
     IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
     ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(doCFS);
     IndexWriter writer = new IndexWriter(dir, conf);
@@ -526,19 +474,23 @@ public class TestBackwardsCompatibility 
     assertEquals("didn't delete the right number of documents", 1, delCount);
 
     // Set one norm so we get a .s0 file:
-    reader.setNorm(21, "content", (float) 1.5);
+    reader.setNorm(21, "content", conf.getSimilarityProvider().get("content").encodeNormValue(1.5f));
     reader.close();
+    dir.close();
+    
+    return indexDir;
   }
 
   /* Verifies that the expected file names were produced */
 
   public void testExactFileNames() throws IOException {
 
-    String outputDir = "lucene.backwardscompat0.index";
-    rmDir(outputDir);
+    String outputDirName = "lucene.backwardscompat0.index";
+    File outputDir = _TestUtil.getTempDir(outputDirName);
+    _TestUtil.rmDir(outputDir);
 
     try {
-      Directory dir = newFSDirectory(new File(fullDir(outputDir)));
+      Directory dir = newFSDirectory(outputDir);
 
       LogMergePolicy mergePolicy = newLogMergePolicy(true, 10);
       mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
@@ -563,7 +515,8 @@ public class TestBackwardsCompatibility 
       assertEquals("didn't delete the right number of documents", 1, delCount);
 
       // Set one norm so we get a .s0 file:
-      reader.setNorm(21, "content", (float) 1.5);
+      SimilarityProvider sim = new DefaultSimilarity();
+      reader.setNorm(21, "content", sim.get("content").encodeNormValue(1.5f));
       reader.close();
 
       // The numbering of fields can vary depending on which
@@ -600,7 +553,7 @@ public class TestBackwardsCompatibility 
       }
       dir.close();
     } finally {
-      rmDir(outputDir);
+      _TestUtil.rmDir(outputDir);
     }
   }
 
@@ -641,23 +594,6 @@ public class TestBackwardsCompatibility 
     writer.addDocument(doc);
   }
 
-  private void rmDir(String dir) throws IOException {
-    File fileDir = new File(fullDir(dir));
-    if (fileDir.exists()) {
-      File[] files = fileDir.listFiles();
-      if (files != null) {
-        for (int i = 0; i < files.length; i++) {
-          files[i].delete();
-        }
-      }
-      fileDir.delete();
-    }
-  }
-
-  public static String fullDir(String dirName) throws IOException {
-    return new File(TEMP_DIR, dirName).getCanonicalPath();
-  }
-
   private int countDocs(DocsEnum docs) throws IOException {
     int count = 0;
     while((docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
@@ -669,9 +605,9 @@ public class TestBackwardsCompatibility 
   // flex: test basics of TermsEnum api on non-flex index
   public void testNextIntoWrongField() throws Exception {
     for(int i=0;i<oldNames.length;i++) {
-      unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
-      String fullPath = fullDir(oldNames[i]);
-      Directory dir = newFSDirectory(new File(fullPath));
+      File oldIndexDir = _TestUtil.getTempDir(oldNames[i]);
+    	_TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndexDir);
+      Directory dir = newFSDirectory(oldIndexDir);
       IndexReader r = IndexReader.open(dir);
       TermsEnum terms = MultiFields.getFields(r).terms("content").iterator();
       BytesRef t = terms.next();
@@ -708,16 +644,16 @@ public class TestBackwardsCompatibility 
 
       r.close();
       dir.close();
-      rmDir(oldNames[i]);
+      _TestUtil.rmDir(oldIndexDir);
     }
   }
   
   public void testNumericFields() throws Exception {
     for(int i=0;i<oldNames.length;i++) {
       
-      unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
-      String fullPath = fullDir(oldNames[i]);
-      Directory dir = newFSDirectory(new File(fullPath));
+      File oldIndexDir = _TestUtil.getTempDir(oldNames[i]);
+      _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndexDir);
+      Directory dir = newFSDirectory(oldIndexDir);
       IndexSearcher searcher = new IndexSearcher(dir, true);
       
       for (int id=10; id<15; id++) {
@@ -752,7 +688,7 @@ public class TestBackwardsCompatibility 
       
       searcher.close();
       dir.close();
-      rmDir(oldNames[i]);
+      _TestUtil.rmDir(oldIndexDir);
     }
   }
 

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestCodecs.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestCodecs.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestCodecs.java Wed Feb  9 09:35:27 2011
@@ -31,6 +31,7 @@ import org.apache.lucene.index.codecs.Fi
 import org.apache.lucene.index.codecs.FieldsProducer;
 import org.apache.lucene.index.codecs.PostingsConsumer;
 import org.apache.lucene.index.codecs.TermsConsumer;
+import org.apache.lucene.index.codecs.TermStats;
 import org.apache.lucene.index.codecs.mocksep.MockSepCodec;
 import org.apache.lucene.index.codecs.preflex.PreFlexCodec;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -98,9 +99,11 @@ public class TestCodecs extends LuceneTe
     public void write(final FieldsConsumer consumer) throws Throwable {
       Arrays.sort(terms);
       final TermsConsumer termsConsumer = consumer.addField(fieldInfo);
-      for (final TermData term : terms)
-        term.write(termsConsumer);
-      termsConsumer.finish();
+      long sumTotalTermCount = 0;
+      for (final TermData term : terms) {
+        sumTotalTermCount += term.write(termsConsumer);
+      }
+      termsConsumer.finish(sumTotalTermCount);
     }
   }
 
@@ -132,8 +135,9 @@ public class TestCodecs extends LuceneTe
       return text.compareTo(((TermData) o).text);
     }
 
-    public void write(final TermsConsumer termsConsumer) throws Throwable {
+    public long write(final TermsConsumer termsConsumer) throws Throwable {
       final PostingsConsumer postingsConsumer = termsConsumer.startTerm(text);
+      long totTF = 0;
       for(int i=0;i<docs.length;i++) {
         final int termDocFreq;
         if (field.omitTF) {
@@ -143,6 +147,7 @@ public class TestCodecs extends LuceneTe
         }
         postingsConsumer.startDoc(docs[i], termDocFreq);
         if (!field.omitTF) {
+          totTF += positions[i].length;
           for(int j=0;j<positions[i].length;j++) {
             final PositionData pos = positions[i][j];
             postingsConsumer.addPosition(pos.pos, pos.payload);
@@ -150,7 +155,8 @@ public class TestCodecs extends LuceneTe
           postingsConsumer.finishDoc();
         }
       }
-      termsConsumer.finishTerm(text, docs.length);
+      termsConsumer.finishTerm(text, new TermStats(docs.length, totTF));
+      return totTF;
     }
   }
 
@@ -357,7 +363,7 @@ public class TestCodecs extends LuceneTe
 
   private ScoreDoc[] search(final IndexWriter writer, final Query q, final int n) throws IOException {
     final IndexReader reader = writer.getReader();
-    final IndexSearcher searcher = new IndexSearcher(reader);
+    final IndexSearcher searcher = newSearcher(reader);
     try {
       return searcher.search(q, null, n).scoreDocs;
     }
@@ -584,7 +590,8 @@ public class TestCodecs extends LuceneTe
 
     final int termIndexInterval = _TestUtil.nextInt(random, 13, 27);
     final SegmentCodecs codecInfo = SegmentCodecs.build(fieldInfos, CodecProvider.getDefault());
-    final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo, new AtomicLong());
+    final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo, null, new AtomicLong(0));
+
     final FieldsConsumer consumer = state.segmentCodecs.codec().fieldsConsumer(state);
     Arrays.sort(fields);
     for (final FieldData field : fields) {

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Wed Feb  9 09:35:27 2011
@@ -132,11 +132,15 @@ public class TestConcurrentMergeSchedule
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer())
         .setMergePolicy(mp));
+    writer.setInfoStream(VERBOSE ? System.out : null);
 
     Document doc = new Document();
     Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
     for(int i=0;i<10;i++) {
+      if (VERBOSE) {
+        System.out.println("\nTEST: cycle");
+      }
       for(int j=0;j<100;j++) {
         idField.setValue(Integer.toString(i*100+j));
         writer.addDocument(doc);
@@ -144,6 +148,9 @@ public class TestConcurrentMergeSchedule
 
       int delID = i;
       while(delID < 100*(1+i)) {
+        if (VERBOSE) {
+          System.out.println("TEST: del " + delID);
+        }
         writer.deleteDocuments(new Term("id", ""+delID));
         delID += 10;
       }

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java Wed Feb  9 09:35:27 2011
@@ -119,6 +119,9 @@ public class TestDeletionPolicy extends 
     }
 
     public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      if (VERBOSE) {
+        System.out.println("TEST: onInit");
+      }
       verifyCommitOrder(commits);
       numOnInit++;
       // do no deletions on init
@@ -126,6 +129,9 @@ public class TestDeletionPolicy extends 
     }
 
     public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+      if (VERBOSE) {
+        System.out.println("TEST: onCommit");
+      }
       verifyCommitOrder(commits);
       doDeletes(commits, true);
     }
@@ -200,8 +206,10 @@ public class TestDeletionPolicy extends 
     IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
         new MockAnalyzer())
         .setIndexDeletionPolicy(policy);
-    LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-    lmp.setUseCompoundFile(true);
+    MergePolicy mp = conf.getMergePolicy();
+    if (mp instanceof LogMergePolicy) {
+      ((LogMergePolicy) mp).setUseCompoundFile(true);
+    }
     IndexWriter writer = new IndexWriter(dir, conf);
     writer.close();
 
@@ -215,8 +223,10 @@ public class TestDeletionPolicy extends 
       conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
           new MockAnalyzer()).setOpenMode(
           OpenMode.APPEND).setIndexDeletionPolicy(policy);
-      lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(true);
+      mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(true);
+      }
       writer = new IndexWriter(dir, conf);
       for(int j=0;j<17;j++) {
         addDoc(writer);
@@ -280,6 +290,10 @@ public class TestDeletionPolicy extends 
   public void testKeepAllDeletionPolicy() throws IOException {
     for(int pass=0;pass<2;pass++) {
 
+      if (VERBOSE) {
+        System.out.println("TEST: cycle pass=" + pass);
+      }
+
       boolean useCompoundFile = (pass % 2) != 0;
 
       // Never deletes a commit
@@ -292,34 +306,48 @@ public class TestDeletionPolicy extends 
           TEST_VERSION_CURRENT, new MockAnalyzer())
           .setIndexDeletionPolicy(policy).setMaxBufferedDocs(10)
           .setMergeScheduler(new SerialMergeScheduler());
-      LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFile);
-      lmp.setMergeFactor(10);
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+      }
       IndexWriter writer = new IndexWriter(dir, conf);
       for(int i=0;i<107;i++) {
         addDoc(writer);
       }
       writer.close();
 
-      conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
-          new MockAnalyzer()).setOpenMode(
-          OpenMode.APPEND).setIndexDeletionPolicy(policy);
-      lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFile);
-      writer = new IndexWriter(dir, conf);
-      writer.optimize();
-      writer.close();
-
-      assertEquals(1, policy.numOnInit);
+      final boolean isOptimized;
+      {
+        IndexReader r = IndexReader.open(dir);
+        isOptimized = r.isOptimized();
+        r.close();
+      }
+      if (!isOptimized) {
+        conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
+                                    new MockAnalyzer()).setOpenMode(
+                                                                    OpenMode.APPEND).setIndexDeletionPolicy(policy);
+        mp = conf.getMergePolicy();
+        if (mp instanceof LogMergePolicy) {
+          ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+        }
+        if (VERBOSE) {
+          System.out.println("TEST: open writer for optimize");
+        }
+        writer = new IndexWriter(dir, conf);
+        writer.setInfoStream(VERBOSE ? System.out : null);
+        writer.optimize();
+        writer.close();
+      }
+      assertEquals(isOptimized ? 0:1, policy.numOnInit);
 
       // If we are not auto committing then there should
       // be exactly 2 commits (one per close above):
-      assertEquals(2, policy.numOnCommit);
+      assertEquals(1 + (isOptimized ? 0:1), policy.numOnCommit);
 
       // Test listCommits
       Collection<IndexCommit> commits = IndexReader.listCommits(dir);
       // 2 from closing writer
-      assertEquals(2, commits.size());
+      assertEquals(1 + (isOptimized ? 0:1), commits.size());
 
       // Make sure we can open a reader on each commit:
       for (final IndexCommit commit : commits) {
@@ -480,8 +508,10 @@ public class TestDeletionPolicy extends 
           TEST_VERSION_CURRENT, new MockAnalyzer())
           .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
           .setMaxBufferedDocs(10);
-      LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFile);
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+      }
       IndexWriter writer = new IndexWriter(dir, conf);
       for(int i=0;i<107;i++) {
         addDoc(writer);
@@ -490,8 +520,10 @@ public class TestDeletionPolicy extends 
 
       conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
           .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
-      lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFile);
+      mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(true);
+      }
       writer = new IndexWriter(dir, conf);
       writer.optimize();
       writer.close();
@@ -529,8 +561,10 @@ public class TestDeletionPolicy extends 
             TEST_VERSION_CURRENT, new MockAnalyzer())
             .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
             .setMaxBufferedDocs(10);
-        LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-        lmp.setUseCompoundFile(useCompoundFile);
+        MergePolicy mp = conf.getMergePolicy();
+        if (mp instanceof LogMergePolicy) {
+          ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+        }
         IndexWriter writer = new IndexWriter(dir, conf);
         for(int i=0;i<17;i++) {
           addDoc(writer);
@@ -585,47 +619,65 @@ public class TestDeletionPolicy extends 
       Directory dir = newDirectory();
       IndexWriterConfig conf = newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer())
-          .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy);
-      LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFile);
+        .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy).setMergePolicy(newInOrderLogMergePolicy());
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+      }
       IndexWriter writer = new IndexWriter(dir, conf);
       writer.close();
       Term searchTerm = new Term("content", "aaa");        
       Query query = new TermQuery(searchTerm);
 
       for(int i=0;i<N+1;i++) {
+        if (VERBOSE) {
+          System.out.println("\nTEST: cycle i=" + i);
+        }
         conf = newIndexWriterConfig(
             TEST_VERSION_CURRENT, new MockAnalyzer())
             .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
-        lmp = (LogMergePolicy) conf.getMergePolicy();
-        lmp.setUseCompoundFile(useCompoundFile);
+        mp = conf.getMergePolicy();
+        if (mp instanceof LogMergePolicy) {
+          ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+        }
         writer = new IndexWriter(dir, conf);
         for(int j=0;j<17;j++) {
           addDoc(writer);
         }
         // this is a commit
+        if (VERBOSE) {
+          System.out.println("TEST: close writer");
+        }
         writer.close();
         IndexReader reader = IndexReader.open(dir, policy, false);
         reader.deleteDocument(3*i+1);
-        reader.setNorm(4*i+1, "content", 2.0F);
-        IndexSearcher searcher = new IndexSearcher(reader);
+        reader.setNorm(4*i+1, "content", conf.getSimilarityProvider().get("content").encodeNormValue(2.0F));
+        IndexSearcher searcher = newSearcher(reader);
         ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
         assertEquals(16*(1+i), hits.length);
         // this is a commit
+        if (VERBOSE) {
+          System.out.println("TEST: close reader numOnCommit=" + policy.numOnCommit);
+        }
         reader.close();
         searcher.close();
       }
       conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
           .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
-      lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFile);
+      mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+      }
+      IndexReader r = IndexReader.open(dir);
+      final boolean wasOptimized = r.isOptimized();
+      r.close();
       writer = new IndexWriter(dir, conf);
       writer.optimize();
       // this is a commit
       writer.close();
 
       assertEquals(2*(N+1)+1, policy.numOnInit);
-      assertEquals(2*(N+2), policy.numOnCommit);
+      assertEquals(2*(N+2) - (wasOptimized ? 1:0), policy.numOnCommit);
 
       IndexSearcher searcher = new IndexSearcher(dir, false);
       ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -644,7 +696,7 @@ public class TestDeletionPolicy extends 
 
           // Work backwards in commits on what the expected
           // count should be.
-          searcher = new IndexSearcher(reader);
+          searcher = newSearcher(reader);
           hits = searcher.search(query, null, 1000).scoreDocs;
           if (i > 1) {
             if (i % 2 == 0) {
@@ -692,8 +744,10 @@ public class TestDeletionPolicy extends 
           TEST_VERSION_CURRENT, new MockAnalyzer())
           .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
           .setMaxBufferedDocs(10);
-      LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-      lmp.setUseCompoundFile(useCompoundFile);
+      MergePolicy mp = conf.getMergePolicy();
+      if (mp instanceof LogMergePolicy) {
+        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+      }
       IndexWriter writer = new IndexWriter(dir, conf);
       writer.close();
       Term searchTerm = new Term("content", "aaa");        
@@ -705,8 +759,10 @@ public class TestDeletionPolicy extends 
             TEST_VERSION_CURRENT, new MockAnalyzer())
             .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy)
             .setMaxBufferedDocs(10);
-        lmp = (LogMergePolicy) conf.getMergePolicy();
-        lmp.setUseCompoundFile(useCompoundFile);
+        mp = conf.getMergePolicy();
+        if (mp instanceof LogMergePolicy) {
+          ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+        }
         writer = new IndexWriter(dir, conf);
         for(int j=0;j<17;j++) {
           addDoc(writer);
@@ -715,8 +771,8 @@ public class TestDeletionPolicy extends 
         writer.close();
         IndexReader reader = IndexReader.open(dir, policy, false);
         reader.deleteDocument(3);
-        reader.setNorm(5, "content", 2.0F);
-        IndexSearcher searcher = new IndexSearcher(reader);
+        reader.setNorm(5, "content", conf.getSimilarityProvider().get("content").encodeNormValue(2.0F));
+        IndexSearcher searcher = newSearcher(reader);
         ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
         assertEquals(16, hits.length);
         // this is a commit
@@ -751,7 +807,7 @@ public class TestDeletionPolicy extends 
 
           // Work backwards in commits on what the expected
           // count should be.
-          searcher = new IndexSearcher(reader);
+          searcher = newSearcher(reader);
           hits = searcher.search(query, null, 1000).scoreDocs;
           assertEquals(expectedCount, hits.length);
           searcher.close();

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java Wed Feb  9 09:35:27 2011
@@ -51,7 +51,7 @@ public class TestFieldsReader extends Lu
     DocHelper.setupDoc(testDoc);
     fieldInfos.add(testDoc);
     dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy());
     ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false);
     IndexWriter writer = new IndexWriter(dir, conf);
     writer.addDocument(testDoc);
@@ -291,7 +291,7 @@ public class TestFieldsReader extends Lu
     Directory tmpDir = newFSDirectory(file);
     assertTrue(tmpDir != null);
 
-    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE);
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMergePolicy(newLogMergePolicy());
     ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false);
     IndexWriter writer = new IndexWriter(tmpDir, conf);
     writer.addDocument(testDoc);

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java Wed Feb  9 09:35:27 2011
@@ -38,9 +38,11 @@ public class TestFilterIndexReader exten
       TestFields(Fields in) {
         super(in);
       }
+      @Override
       public FieldsEnum iterator() throws IOException {
         return new TestFieldsEnum(super.iterator());
       }
+      @Override
       public Terms terms(String field) throws IOException {
         return new TestTerms(super.terms(field));
       }
@@ -51,6 +53,7 @@ public class TestFilterIndexReader exten
         super(in);
       }
 
+      @Override
       public TermsEnum iterator() throws IOException {
         return new TestTermsEnum(super.iterator());
       }
@@ -61,6 +64,7 @@ public class TestFilterIndexReader exten
         super(in);
       }
 
+      @Override
       public TermsEnum terms() throws IOException {
         return new TestTermsEnum(super.terms());
       }

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Wed Feb  9 09:35:27 2011
@@ -18,6 +18,8 @@ package org.apache.lucene.index;
  */
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
@@ -66,9 +68,9 @@ public class TestIndexFileDeleter extend
     Term searchTerm = new Term("id", "7");
     int delCount = reader.deleteDocuments(searchTerm);
     assertEquals("didn't delete the right number of documents", 1, delCount);
-
+    Similarity sim = new DefaultSimilarity().get("content");
     // Set one norm so we get a .s0 file:
-    reader.setNorm(21, "content", (float) 1.5);
+    reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
     reader.close();
 
     // Now, artificially create an extra .del file & extra

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexReader.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexReader.java Wed Feb  9 09:35:27 2011
@@ -29,7 +29,7 @@ import java.util.Map;
 import java.util.HashMap;
 import java.util.Set;
 import java.util.SortedSet;
-
+import org.junit.Assume;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -39,8 +39,10 @@ import org.apache.lucene.document.SetBas
 import org.apache.lucene.index.IndexReader.FieldOption;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
@@ -329,6 +331,7 @@ public class TestIndexReader extends Luc
 
         //  add 100 documents with term : aaa
         writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+        writer.setInfoStream(VERBOSE ? System.out : null);
         for (int i = 0; i < 100; i++) {
             addDoc(writer, searchTerm.text());
         }
@@ -357,7 +360,7 @@ public class TestIndexReader extends Luc
 
         // CREATE A NEW READER and re-test
         reader = IndexReader.open(dir, false);
-        assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
+        assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm));
         assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
         reader.close();
         reader2.close();
@@ -368,7 +371,7 @@ public class TestIndexReader extends Luc
         Directory dir = newDirectory();
         byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
         
-        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
         
         for (int i = 0; i < 10; i++) {
           addDoc(writer, "document number " + (i + 1));
@@ -377,7 +380,7 @@ public class TestIndexReader extends Luc
           addDocumentWithTermVectorFields(writer);
         }
         writer.close();
-        writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
+        writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setMergePolicy(newInOrderLogMergePolicy()));
         Document doc = new Document();
         doc.add(new Field("bin1", bin));
         doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
@@ -414,7 +417,7 @@ public class TestIndexReader extends Luc
         // force optimize
 
 
-        writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
+        writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setMergePolicy(newInOrderLogMergePolicy()));
         writer.optimize();
         writer.close();
         reader = IndexReader.open(dir, false);
@@ -462,8 +465,9 @@ public class TestIndexReader extends Luc
           // expected
         }
 
+        Similarity sim = new DefaultSimilarity().get("aaa");
         try {
-          reader.setNorm(5, "aaa", 2.0f);
+          reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
           fail("setNorm after close failed to throw IOException");
         } catch (AlreadyClosedException e) {
           // expected
@@ -502,8 +506,9 @@ public class TestIndexReader extends Luc
           // expected
         }
 
+        Similarity sim = new DefaultSimilarity().get("aaa");
         try {
-          reader.setNorm(5, "aaa", 2.0f);
+          reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
           fail("setNorm should have hit LockObtainFailedException");
         } catch (LockObtainFailedException e) {
           // expected
@@ -533,7 +538,8 @@ public class TestIndexReader extends Luc
 
         //  now open reader & set norm for doc 0
         IndexReader reader = IndexReader.open(dir, false);
-        reader.setNorm(0, "content", (float) 2.0);
+        Similarity sim = new DefaultSimilarity().get("content");
+        reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
 
         // we should be holding the write lock now:
         assertTrue("locked", IndexWriter.isLocked(dir));
@@ -547,7 +553,7 @@ public class TestIndexReader extends Luc
         IndexReader reader2 = IndexReader.open(dir, false);
 
         // set norm again for doc 0
-        reader.setNorm(0, "content", (float) 3.0);
+        reader.setNorm(0, "content", sim.encodeNormValue(3.0f));
         assertTrue("locked", IndexWriter.isLocked(dir));
 
         reader.close();
@@ -577,15 +583,16 @@ public class TestIndexReader extends Luc
         addDoc(writer, searchTerm.text());
         writer.close();
 
+        Similarity sim = new DefaultSimilarity().get("content");
         //  now open reader & set norm for doc 0 (writes to
         //  _0_1.s0)
         reader = IndexReader.open(dir, false);
-        reader.setNorm(0, "content", (float) 2.0);
+        reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
         reader.close();
         
         //  now open reader again & set norm for doc 0 (writes to _0_2.s0)
         reader = IndexReader.open(dir, false);
-        reader.setNorm(0, "content", (float) 2.0);
+        reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
         reader.close();
         assertFalse("failed to remove first generation norms file on writing second generation",
                     dir.fileExists("_0_1.s0"));
@@ -690,7 +697,6 @@ public class TestIndexReader extends Luc
 
         // CREATE A NEW READER and re-test
         reader = IndexReader.open(dir, false);
-        assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
         assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
         assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
         assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
@@ -831,7 +837,6 @@ public class TestIndexReader extends Luc
       writer.close();
       IndexReader reader = IndexReader.open(dir, false);
       reader.deleteDocument(0);
-      reader.deleteDocument(1);
       reader.close();
       reader = IndexReader.open(dir, false);
       reader.undeleteAll();
@@ -848,7 +853,6 @@ public class TestIndexReader extends Luc
       writer.close();
       IndexReader reader = IndexReader.open(dir, false);
       reader.deleteDocument(0);
-      reader.deleteDocument(1);
       reader.close();
       reader = IndexReader.open(dir, false);
       reader.undeleteAll();
@@ -880,6 +884,10 @@ public class TestIndexReader extends Luc
       // First build up a starting index:
       MockDirectoryWrapper startDir = newDirectory();
       IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+      if (VERBOSE) {
+        System.out.println("TEST: create initial index");
+        writer.setInfoStream(System.out);
+      }
       for(int i=0;i<157;i++) {
         Document d = new Document();
         d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
@@ -890,6 +898,20 @@ public class TestIndexReader extends Luc
       }
       writer.close();
 
+      {
+        IndexReader r = IndexReader.open(startDir);
+        IndexSearcher searcher = newSearcher(r);
+        ScoreDoc[] hits = null;
+        try {
+          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+        } catch (IOException e) {
+          e.printStackTrace();
+          fail("exception when init searching: " + e);
+        }
+        searcher.close();
+        r.close();
+      }
+
       long diskUsage = startDir.getRecomputedActualSizeInBytes();
       long diskFree = diskUsage+100;
 
@@ -947,13 +969,13 @@ public class TestIndexReader extends Luc
 
           dir.setMaxSizeInBytes(thisDiskFree);
           dir.setRandomIOExceptionRate(rate);
-
+          Similarity sim = new DefaultSimilarity().get("content");
           try {
             if (0 == x) {
               int docId = 12;
               for(int i=0;i<13;i++) {
                 reader.deleteDocument(docId);
-                reader.setNorm(docId, "content", (float) 2.0);
+                reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
                 docId += 12;
               }
             }
@@ -975,14 +997,6 @@ public class TestIndexReader extends Luc
             }
           }
 
-          // Whether we succeeded or failed, check that all
-          // un-referenced files were in fact deleted (ie,
-          // we did not create garbage).  Just create a
-          // new IndexFileDeleter, have it delete
-          // unreferenced files, then verify that in fact
-          // no files were deleted:
-          TestIndexWriter.assertNoUnreferencedFiles(dir, "reader.close() failed to delete unreferenced files");
-
           // Finally, verify index is not corrupt, and, if
           // we succeeded, we see all docs changed, and if
           // we failed, we see either all docs or no docs
@@ -1010,7 +1024,7 @@ public class TestIndexReader extends Luc
           }
           */
 
-          IndexSearcher searcher = new IndexSearcher(newReader);
+          IndexSearcher searcher = newSearcher(newReader);
           ScoreDoc[] hits = null;
           try {
             hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
@@ -1110,8 +1124,9 @@ public class TestIndexReader extends Luc
       }
 
       reader = IndexReader.open(dir, false);
+      Similarity sim = new DefaultSimilarity().get("content");
       try {
-        reader.setNorm(1, "content", (float) 2.0);
+        reader.setNorm(1, "content", sim.encodeNormValue(2.0f));
         fail("did not hit exception when calling setNorm on an invalid doc number");
       } catch (ArrayIndexOutOfBoundsException e) {
         // expected
@@ -1148,7 +1163,7 @@ public class TestIndexReader extends Luc
 
     public void testMultiReaderDeletes() throws Exception {
       Directory dir = newDirectory();
-      RandomIndexWriter w = new RandomIndexWriter(random, dir);
+      RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
       Document doc = new Document();
       doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED));
       w.addDocument(doc);
@@ -1264,9 +1279,6 @@ public class TestIndexReader extends Luc
 
         // Open another reader to confirm that everything is deleted
         reader2 = IndexReader.open(dir, false);
-        assertEquals("reopened 2", 100, reader2.docFreq(searchTerm1));
-        assertEquals("reopened 2", 100, reader2.docFreq(searchTerm2));
-        assertEquals("reopened 2", 100, reader2.docFreq(searchTerm3));
         assertTermDocsCount("reopened 2", reader2, searchTerm1, 0);
         assertTermDocsCount("reopened 2", reader2, searchTerm2, 0);
         assertTermDocsCount("reopened 2", reader2, searchTerm3, 100);
@@ -1862,4 +1874,65 @@ public class TestIndexReader extends Luc
     assertTrue(IndexReader.indexExists(dir));
     dir.close();
   }
+
+  // Make sure totalTermFreq works correctly in the terms
+  // dict cache
+  public void testTotalTermFreqCached() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    Document d = new Document();
+    d.add(newField("f", "a a b", Field.Index.ANALYZED));
+    writer.addDocument(d);
+    IndexReader r = writer.getReader();
+    writer.close();
+    Terms terms = MultiFields.getTerms(r, "f");
+    try {
+      // Make sure codec impls totalTermFreq (eg PreFlex doesn't)
+      Assume.assumeTrue(terms.totalTermFreq(new BytesRef("b")) != -1);
+      assertEquals(1, terms.totalTermFreq(new BytesRef("b")));
+      assertEquals(2, terms.totalTermFreq(new BytesRef("a")));
+      assertEquals(1, terms.totalTermFreq(new BytesRef("b")));
+    } finally {
+      r.close();
+      dir.close();
+    }
+  }
+
+  // LUCENE-2474
+  public void testReaderFinishedListener() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.addDocument(new Document());
+    writer.commit();
+    writer.addDocument(new Document());
+    writer.commit();
+    final IndexReader reader = writer.getReader();
+    final int[] closeCount = new int[1];
+    final IndexReader.ReaderFinishedListener listener = new IndexReader.ReaderFinishedListener() {
+      public void finished(IndexReader reader) {
+        closeCount[0]++;
+      }
+    };
+
+    reader.addReaderFinishedListener(listener);
+
+    reader.close();
+
+    // Just the top reader
+    assertEquals(1, closeCount[0]);
+    writer.close();
+
+    // Now also the subs
+    assertEquals(3, closeCount[0]);
+
+    IndexReader reader2 = IndexReader.open(dir);
+    reader2.addReaderFinishedListener(listener);
+
+    closeCount[0] = 0;
+    reader2.close();
+    assertEquals(3, closeCount[0]);
+    dir.close();
+  }
 }

Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java?rev=1068809&r1=1068808&r2=1068809&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java Wed Feb  9 09:35:27 2011
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
  */
 
 import org.apache.lucene.index.SegmentReader.Norm;
+import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
@@ -272,13 +273,14 @@ public class TestIndexReaderClone extend
    * @throws Exception
    */
   private void performDefaultTests(IndexReader r1) throws Exception {
-    float norm1 = Similarity.getDefault().decodeNormValue(MultiNorms.norms(r1, "field1")[4]);
+    Similarity sim = new DefaultSimilarity().get("field1");
+    float norm1 = sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]);
 
     IndexReader pr1Clone = (IndexReader) r1.clone();
     pr1Clone.deleteDocument(10);
-    pr1Clone.setNorm(4, "field1", 0.5f);
-    assertTrue(Similarity.getDefault().decodeNormValue(MultiNorms.norms(r1, "field1")[4]) == norm1);
-    assertTrue(Similarity.getDefault().decodeNormValue(MultiNorms.norms(pr1Clone, "field1")[4]) != norm1);
+    pr1Clone.setNorm(4, "field1", sim.encodeNormValue(0.5f));
+    assertTrue(sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]) == norm1);
+    assertTrue(sim.decodeNormValue(MultiNorms.norms(pr1Clone, "field1")[4]) != norm1);
 
     final Bits delDocs = MultiFields.getDeletedDocs(r1);
     assertTrue(delDocs == null || !delDocs.get(10));
@@ -327,7 +329,8 @@ public class TestIndexReaderClone extend
     TestIndexReaderReopen.createIndex(random, dir1, false);
     SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
     origSegmentReader.deleteDocument(1);
-    origSegmentReader.setNorm(4, "field1", 0.5f);
+    Similarity sim = new DefaultSimilarity().get("field1");
+    origSegmentReader.setNorm(4, "field1", sim.encodeNormValue(0.5f));
 
     SegmentReader clonedSegmentReader = (SegmentReader) origSegmentReader
         .clone();
@@ -426,8 +429,9 @@ public class TestIndexReaderClone extend
     final Directory dir1 = newDirectory();
     TestIndexReaderReopen.createIndex(random, dir1, false);
     IndexReader orig = IndexReader.open(dir1, false);
-    orig.setNorm(1, "field1", 17.0f);
-    final byte encoded = Similarity.getDefault().encodeNormValue(17.0f);
+    Similarity sim = new DefaultSimilarity().get("field1");
+    orig.setNorm(1, "field1", sim.encodeNormValue(17.0f));
+    final byte encoded = sim.encodeNormValue(17.0f);
     assertEquals(encoded, MultiNorms.norms(orig, "field1")[1]);
 
     // the cloned segmentreader should have 2 references, 1 to itself, and 1 to