You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by us...@apache.org on 2010/04/06 21:19:36 UTC

svn commit: r931278 [9/10] - in /lucene/dev/trunk: lucene/ lucene/backwards/src/ lucene/backwards/src/java/org/apache/lucene/index/ lucene/backwards/src/java/org/apache/lucene/index/codecs/ lucene/backwards/src/java/org/apache/lucene/search/ lucene/bac...

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/UnicodeUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/UnicodeUtil.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/UnicodeUtil.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/UnicodeUtil.java Tue Apr  6 19:19:27 2010
@@ -74,30 +74,20 @@ final public class UnicodeUtil {
   /**
    * @lucene.internal
    */
-  public static final class UTF8Result {
-    public byte[] result = new byte[10];
-    public int length;
-
-    public void setLength(int newLength) {
-      if (result.length < newLength) {
-        result = ArrayUtil.grow(result, newLength);
-      }
-      length = newLength;
-    }
-  }
-
-  /**
-   * @lucene.internal
-   */
   public static final class UTF16Result {
     public char[] result = new char[10];
     public int[] offsets = new int[10];
     public int length;
 
+    /*
+    public String toString() {
+      return new String(result, 0, length);
+    }
+    */
+
     public void setLength(int newLength) {
-      if (result.length < newLength) {
+      if (result.length < newLength)
         result = ArrayUtil.grow(result, newLength);
-      }
       length = newLength;
     }
 
@@ -105,80 +95,89 @@ final public class UnicodeUtil {
       setLength(other.length);
       System.arraycopy(other.result, 0, result, 0, length);
     }
+
+    public void copyText(String other) {
+      final int otherLength = other.length();
+      setLength(otherLength);
+      other.getChars(0, otherLength, result, 0);
+      length = otherLength;
+    }
   }
 
   /** Encode characters from a char[] source, starting at
-   *  offset and stopping when the character 0xffff is seen.
-   *  Returns the number of bytes written to bytesOut. */
-  public static void UTF16toUTF8(final char[] source, final int offset, UTF8Result result) {
-
+   *  offset for length chars.  Returns a hash of the resulting bytes */
+  public static int UTF16toUTF8WithHash(final char[] source, final int offset, final int length, BytesRef result) {
+    int hash = 0;
     int upto = 0;
     int i = offset;
-    byte[] out = result.result;
+    final int end = offset + length;
+    byte[] out = result.bytes;
+    // Pre-allocate for worst case 4-for-1
+    final int maxLen = length * 4;
+    if (out.length < maxLen)
+      out = result.bytes = new byte[ArrayUtil.oversize(maxLen, 1)];
+    result.offset = 0;
 
-    while(true) {
+    while(i < end) {
       
       final int code = (int) source[i++];
 
-      if (upto+4 > out.length) {
-        out = result.result = ArrayUtil.grow(out, upto+4);
-      }
-      if (code < 0x80)
-        out[upto++] = (byte) code;
-      else if (code < 0x800) {
-        out[upto++] = (byte) (0xC0 | (code >> 6));
-        out[upto++] = (byte)(0x80 | (code & 0x3F));
+      if (code < 0x80) {
+        hash = 31*hash + (out[upto++] = (byte) code);
+      } else if (code < 0x800) {
+        hash = 31*hash + (out[upto++] = (byte) (0xC0 | (code >> 6)));
+        hash = 31*hash + (out[upto++] = (byte)(0x80 | (code & 0x3F)));
       } else if (code < 0xD800 || code > 0xDFFF) {
-        if (code == 0xffff)
-          // END
-          break;
-        out[upto++] = (byte)(0xE0 | (code >> 12));
-        out[upto++] = (byte)(0x80 | ((code >> 6) & 0x3F));
-        out[upto++] = (byte)(0x80 | (code & 0x3F));
+        hash = 31*hash + (out[upto++] = (byte)(0xE0 | (code >> 12)));
+        hash = 31*hash + (out[upto++] = (byte)(0x80 | ((code >> 6) & 0x3F)));
+        hash = 31*hash + (out[upto++] = (byte)(0x80 | (code & 0x3F)));
       } else {
         // surrogate pair
         // confirm valid high surrogate
-        if (code < 0xDC00 && source[i] != 0xffff) {
+        if (code < 0xDC00 && i < end) {
           int utf32 = (int) source[i];
           // confirm valid low surrogate and write pair
           if (utf32 >= 0xDC00 && utf32 <= 0xDFFF) { 
             utf32 = ((code - 0xD7C0) << 10) + (utf32 & 0x3FF);
             i++;
-            out[upto++] = (byte)(0xF0 | (utf32 >> 18));
-            out[upto++] = (byte)(0x80 | ((utf32 >> 12) & 0x3F));
-            out[upto++] = (byte)(0x80 | ((utf32 >> 6) & 0x3F));
-            out[upto++] = (byte)(0x80 | (utf32 & 0x3F));
+            hash = 31*hash + (out[upto++] = (byte)(0xF0 | (utf32 >> 18)));
+            hash = 31*hash + (out[upto++] = (byte)(0x80 | ((utf32 >> 12) & 0x3F)));
+            hash = 31*hash + (out[upto++] = (byte)(0x80 | ((utf32 >> 6) & 0x3F)));
+            hash = 31*hash + (out[upto++] = (byte)(0x80 | (utf32 & 0x3F)));
             continue;
           }
         }
         // replace unpaired surrogate or out-of-order low surrogate
         // with substitution character
-        out[upto++] = (byte) 0xEF;
-        out[upto++] = (byte) 0xBF;
-        out[upto++] = (byte) 0xBD;
+        hash = 31*hash + (out[upto++] = (byte) 0xEF);
+        hash = 31*hash + (out[upto++] = (byte) 0xBF);
+        hash = 31*hash + (out[upto++] = (byte) 0xBD);
       }
     }
-    //assert matches(source, offset, i-offset-1, out, upto);
+    //assert matches(source, offset, length, out, upto);
     result.length = upto;
+    return hash;
   }
 
   /** Encode characters from a char[] source, starting at
    *  offset for length chars.  Returns the number of bytes
    *  written to bytesOut. */
-  public static void UTF16toUTF8(final char[] source, final int offset, final int length, UTF8Result result) {
+  public static void UTF16toUTF8(final char[] source, final int offset, final int length, BytesRef result) {
 
     int upto = 0;
     int i = offset;
     final int end = offset + length;
-    byte[] out = result.result;
+    byte[] out = result.bytes;
+    // Pre-allocate for worst case 4-for-1
+    final int maxLen = length * 4;
+    if (out.length < maxLen)
+      out = result.bytes = new byte[ArrayUtil.oversize(maxLen, 1)];
+    result.offset = 0;
 
     while(i < end) {
       
       final int code = (int) source[i++];
 
-      if (upto+4 > out.length) {
-        out = result.result = ArrayUtil.grow(out, upto+4);
-      }
       if (code < 0x80)
         out[upto++] = (byte) code;
       else if (code < 0x800) {
@@ -191,7 +190,7 @@ final public class UnicodeUtil {
       } else {
         // surrogate pair
         // confirm valid high surrogate
-        if (code < 0xDC00 && i < end && source[i] != 0xffff) {
+        if (code < 0xDC00 && i < end) {
           int utf32 = (int) source[i];
           // confirm valid low surrogate and write pair
           if (utf32 >= 0xDC00 && utf32 <= 0xDFFF) { 
@@ -218,18 +217,20 @@ final public class UnicodeUtil {
   /** Encode characters from this String, starting at offset
    *  for length characters.  Returns the number of bytes
    *  written to bytesOut. */
-  public static void UTF16toUTF8(final String s, final int offset, final int length, UTF8Result result) {
+  public static void UTF16toUTF8(final CharSequence s, final int offset, final int length, BytesRef result) {
     final int end = offset + length;
 
-    byte[] out = result.result;
+    byte[] out = result.bytes;
+    result.offset = 0;
+    // Pre-allocate for worst case 4-for-1
+    final int maxLen = length * 4;
+    if (out.length < maxLen)
+      out = result.bytes = new byte[ArrayUtil.oversize(maxLen, 1)];
 
     int upto = 0;
     for(int i=offset;i<end;i++) {
       final int code = (int) s.charAt(i);
 
-      if (upto+4 > out.length) {
-        out = result.result = ArrayUtil.grow(out, upto+4);
-      }
       if (code < 0x80)
         out[upto++] = (byte) code;
       else if (code < 0x800) {
@@ -332,6 +333,71 @@ final public class UnicodeUtil {
     result.length = outUpto;
   }
 
+  /**
+   * Get the next valid UTF-16 String in UTF-16 order.
+   * <p>
+   * If the input String is already valid, it is returned.
+   * Otherwise the next String in code unit order is returned.
+   * </p>
+   * @param s input String (possibly with unpaired surrogates)
+   * @return next valid UTF-16 String in UTF-16 order
+   */
+  public static String nextValidUTF16String(String s) {
+    if (validUTF16String(s))
+        return s;
+    else {
+      UTF16Result chars = new UTF16Result();
+      chars.copyText(s);
+      nextValidUTF16String(chars);
+      return new String(chars.result, 0, chars.length);
+    }
+  }
+  
+  public static void nextValidUTF16String(UTF16Result s) {
+    final int size = s.length;
+    for (int i = 0; i < size; i++) {
+      char ch = s.result[i];
+      if (ch >= UnicodeUtil.UNI_SUR_HIGH_START
+          && ch <= UnicodeUtil.UNI_SUR_HIGH_END) {
+        if (i < size - 1) {
+          i++;
+          char nextCH = s.result[i];
+          if (nextCH >= UnicodeUtil.UNI_SUR_LOW_START
+              && nextCH <= UnicodeUtil.UNI_SUR_LOW_END) {
+            // Valid surrogate pair
+          } else
+          // Unmatched high surrogate
+            if (nextCH < UnicodeUtil.UNI_SUR_LOW_START) { // SMP not enumerated
+              s.setLength(i + 1);
+              s.result[i] = (char) UnicodeUtil.UNI_SUR_LOW_START;             
+              return;
+            } else { // SMP already enumerated
+              if (s.result[i - 1] == UnicodeUtil.UNI_SUR_HIGH_END) {
+                s.result[i - 1] = (char) (UnicodeUtil.UNI_SUR_LOW_END + 1);
+                s.setLength(i);               
+              } else {
+                s.result[i - 1]++;
+                s.result[i] = (char) UnicodeUtil.UNI_SUR_LOW_START;
+                s.setLength(i + 1);
+              }            
+              return;
+            }
+        } else {
+        // Unmatched high surrogate in final position, SMP not yet enumerated
+          s.setLength(i + 2);
+          s.result[i + 1] = (char) UnicodeUtil.UNI_SUR_LOW_START;
+          return;
+        }
+      } else if (ch >= UnicodeUtil.UNI_SUR_LOW_START
+          && ch <= UnicodeUtil.UNI_SUR_LOW_END) {
+      // Unmatched low surrogate, SMP already enumerated
+        s.setLength(i + 1);
+        s.result[i] = (char) (UnicodeUtil.UNI_SUR_LOW_END + 1);
+        return;
+      }
+    }
+  }
+  
   // Only called from assert
   /*
   private static boolean matches(char[] source, int offset, int length, byte[] result, int upto) {
@@ -386,8 +452,8 @@ final public class UnicodeUtil {
       return false;
     }
   }
-
-  public static final boolean validUTF16String(String s) {
+  */
+  public static final boolean validUTF16String(CharSequence s) {
     final int size = s.length();
     for(int i=0;i<size;i++) {
       char ch = s.charAt(i);
@@ -431,5 +497,4 @@ final public class UnicodeUtil {
 
     return true;
   }
-  */
 }

Propchange: lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/automaton/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Tue Apr  6 19:19:27 2010
@@ -0,0 +1 @@
+moman

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestDemo.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestDemo.java Tue Apr  6 19:19:27 2010
@@ -24,11 +24,13 @@ import org.apache.lucene.analysis.standa
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
@@ -54,7 +56,8 @@ public class TestDemo extends LuceneTest
         TEST_VERSION_CURRENT, analyzer).setMaxFieldLength(25000));
     
     Document doc = new Document();
-    String text = "This is the text to be indexed.";
+    String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
+    String text = "This is the text to be indexed. " + longTerm;
     doc.add(new Field("fieldname", text, Field.Store.YES,
         Field.Index.ANALYZED));
     iwriter.addDocument(doc);
@@ -62,15 +65,17 @@ public class TestDemo extends LuceneTest
     
     // Now search the index:
     IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
+
+    assertEquals(1, isearcher.search(new TermQuery(new Term("fieldname", longTerm)), 1).totalHits);
     // Parse a simple query that searches for "text":
     QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer);
     Query query = parser.parse("text");
-    ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs;
+    ScoreDoc[] hits = isearcher.search(query, null, 1).scoreDocs;
     assertEquals(1, hits.length);
     // Iterate through the results:
     for (int i = 0; i < hits.length; i++) {
       Document hitDoc = isearcher.doc(hits[i].doc);
-      assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
+      assertEquals(text, hitDoc.get("fieldname"));
     }
     isearcher.close();
     directory.close();

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java Tue Apr  6 19:19:27 2010
@@ -89,6 +89,9 @@ public class TestSearchForDuplicates ext
       for (int j = 0; j < MAX_DOCS; j++) {
         Document d = new Document();
         d.add(new Field(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
+
+        // NOTE: this ID_FIELD produces no tokens since
+        // SimpleAnalyzer discards numbers
         d.add(new Field(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
         writer.addDocument(d);
       }

Propchange: lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  6 19:19:27 2010
@@ -1,4 +1,5 @@
+/lucene/java/branches/flex_1458/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java:824912-931101
 /lucene/java/branches/lucene_2_4/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java:748824
-/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java:825998,829134,829881,831036,896850,909334
+/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java:829134,829881,831036,896850,909334
 /lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java:880793,896906
-/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java:924483-925561
+/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java:924483-924731,924781,925176-925462

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java Tue Apr  6 19:19:27 2010
@@ -17,8 +17,9 @@ package org.apache.lucene.analysis;
  * limitations under the License.
  */
 
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
 public class TestNumericTokenStream extends BaseTokenStreamTestCase {
@@ -29,27 +30,47 @@ public class TestNumericTokenStream exte
   public void testLongStream() throws Exception {
     final NumericTokenStream stream=new NumericTokenStream().setLongValue(lvalue);
     // use getAttribute to test if attributes really exist, if not an IAE will be throwed
-    final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
+    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
     final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
+    final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class);
+    final BytesRef bytes = new BytesRef();
+    stream.reset();
+    assertEquals(64, numericAtt.getValueSize());
+    assertEquals(lvalue, numericAtt.getRawValue());
     for (int shift=0; shift<64; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
       assertTrue("New token is available", stream.incrementToken());
-      assertEquals("Term is correctly encoded", NumericUtils.longToPrefixCoded(lvalue, shift), termAtt.term());
-      assertEquals("Type correct", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
+      assertEquals("Shift value wrong", shift, numericAtt.getShift());
+      final int hash = bytesAtt.toBytesRef(bytes);
+      assertEquals("Hash incorrect", bytes.hashCode(), hash);
+      assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), NumericUtils.prefixCodedToLong(bytes));
+      assertEquals("Type incorrect", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
     }
-    assertFalse("No more tokens available", stream.incrementToken());
+    assertFalse("More tokens available", stream.incrementToken());
+    stream.end();
+    stream.close();
   }
 
   public void testIntStream() throws Exception {
     final NumericTokenStream stream=new NumericTokenStream().setIntValue(ivalue);
     // use getAttribute to test if attributes really exist, if not an IAE will be throwed
-    final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
+    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
     final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
+    final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class);
+    final BytesRef bytes = new BytesRef();
+    stream.reset();
+    assertEquals(32, numericAtt.getValueSize());
+    assertEquals(ivalue, numericAtt.getRawValue());
     for (int shift=0; shift<32; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
       assertTrue("New token is available", stream.incrementToken());
-      assertEquals("Term is correctly encoded", NumericUtils.intToPrefixCoded(ivalue, shift), termAtt.term());
-      assertEquals("Type correct", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
+      assertEquals("Shift value wrong", shift, numericAtt.getShift());
+      final int hash = bytesAtt.toBytesRef(bytes);
+      assertEquals("Hash incorrect", bytes.hashCode(), hash);
+      assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), NumericUtils.prefixCodedToInt(bytes));
+      assertEquals("Type incorrect", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
     }
-    assertFalse("No more tokens available", stream.incrementToken());
+    assertFalse("More tokens available", stream.incrementToken());
+    stream.end();
+    stream.close();
   }
   
   public void testNotInitialized() throws Exception {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java Tue Apr  6 19:19:27 2010
@@ -22,6 +22,7 @@ import org.apache.lucene.util.AttributeI
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.AttributeSource.AttributeFactory;
 
+@Deprecated
 public class TestSimpleAttributeImpls extends LuceneTestCase {
 
   public TestSimpleAttributeImpls(String name) {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestTermAttributeImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestTermAttributeImpl.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestTermAttributeImpl.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestTermAttributeImpl.java Tue Apr  6 19:19:27 2010
@@ -107,10 +107,10 @@ public class TestTermAttributeImpl exten
     char[] b = {'a', 'l', 'o', 'h', 'a'};
     TermAttributeImpl t = new TermAttributeImpl();
     t.setTermBuffer(b, 0, 5);
-    assertEquals("term=aloha", t.toString());
+    assertEquals("aloha", t.toString());
 
     t.setTermBuffer("hi there");
-    assertEquals("term=hi there", t.toString());
+    assertEquals("hi there", t.toString());
   }
 
   public void testMixedStringArray() throws Exception {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestDateTools.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestDateTools.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestDateTools.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestDateTools.java Tue Apr  6 19:19:27 2010
@@ -197,4 +197,4 @@ public class TestDateTools extends Local
     }
   }
 
-}
+}
\ No newline at end of file

Propchange: lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestDateTools.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  6 19:19:27 2010
@@ -1,4 +1,5 @@
+/lucene/java/branches/flex_1458/src/test/org/apache/lucene/document/TestDateTools.java:824912-931101
 /lucene/java/branches/lucene_2_4/src/test/org/apache/lucene/document/TestDateTools.java:748824
-/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/document/TestDateTools.java:825998,829134,829881,831036,896850,909334
+/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/document/TestDateTools.java:829134,829881,831036,896850,909334
 /lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/document/TestDateTools.java:880793,896906
-/lucene/java/trunk/src/test/org/apache/lucene/document/TestDateTools.java:924483-925561
+/lucene/java/trunk/src/test/org/apache/lucene/document/TestDateTools.java:924483-924731,924781,925176-925462

Propchange: lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestNumberTools.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  6 19:19:27 2010
@@ -1,4 +1,5 @@
+/lucene/java/branches/flex_1458/src/test/org/apache/lucene/document/TestNumberTools.java:824912-931101
 /lucene/java/branches/lucene_2_4/src/test/org/apache/lucene/document/TestNumberTools.java:748824
-/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/document/TestNumberTools.java:825998,829134,829881,831036,896850,909334
+/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/document/TestNumberTools.java:829134,829881,831036,896850,909334
 /lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/document/TestNumberTools.java:880793,896906
-/lucene/java/trunk/src/test/org/apache/lucene/document/TestNumberTools.java:924483-925561
+/lucene/java/trunk/src/test/org/apache/lucene/document/TestNumberTools.java:924483-924731,924781,925176-925462

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java Tue Apr  6 19:19:27 2010
@@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util._TestUtil;
 
 import org.apache.lucene.search.PhraseQuery;
 
@@ -47,6 +48,7 @@ public class TestAddIndexesNoOptimize ex
     addDocs(writer, 100);
     assertEquals(100, writer.maxDoc());
     writer.close();
+    _TestUtil.checkIndex(dir);
 
     writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE));
     ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
@@ -68,6 +70,7 @@ public class TestAddIndexesNoOptimize ex
     writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
     assertEquals(190, writer.maxDoc());
     writer.close();
+    _TestUtil.checkIndex(dir);
 
     // make sure the old index is correct
     verifyNumDocs(aux, 40);
@@ -128,12 +131,13 @@ public class TestAddIndexesNoOptimize ex
 
   public void testWithPendingDeletes() throws IOException {
     // main directory
-    Directory dir = new RAMDirectory();
+    Directory dir = new MockRAMDirectory();
     // auxiliary directory
-    Directory aux = new RAMDirectory();
+    Directory aux = new MockRAMDirectory();
 
     setUpDirs(dir, aux);
     IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
+
     writer.addIndexesNoOptimize(new Directory[] {aux});
 
     // Adds 10 docs, then replaces them with another 10

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Tue Apr  6 19:19:27 2010
@@ -26,6 +26,7 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.OutputStream;
 import java.util.Arrays;
+import java.util.Random;
 import java.util.Enumeration;
 import java.util.List;
 import java.util.ArrayList;
@@ -39,14 +40,18 @@ import org.apache.lucene.document.Fielda
 import org.apache.lucene.document.FieldSelector;
 import org.apache.lucene.document.FieldSelectorResult;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.ReaderUtil;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.util.BytesRef;
 
 /*
   Verify we can read the pre-2.1 file format, do searches
@@ -134,6 +139,8 @@ public class TestBackwardsCompatibility 
                              "24.nocfs",
                              "29.cfs",
                              "29.nocfs",
+                             "30.cfs",
+                             "30.nocfs",
   };
   
   private void assertCompressedFields29(Directory dir, boolean shouldStillBeCompressed) throws IOException {
@@ -201,14 +208,19 @@ public class TestBackwardsCompatibility 
     }
   }
 
-  public void testOptimizeOldIndex() throws IOException {
+  public void testOptimizeOldIndex() throws Exception {
     int hasTested29 = 0;
+
+    Random rand = newRandom();
     
     for(int i=0;i<oldNames.length;i++) {
       unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
+
       String fullPath = fullDir(oldNames[i]);
       Directory dir = FSDirectory.open(new File(fullPath));
 
+      FlexTestUtil.verifyFlexVsPreFlex(rand, dir);
+
       if (oldNames[i].startsWith("29.")) {
         assertCompressedFields29(dir, true);
         hasTested29++;
@@ -217,6 +229,7 @@ public class TestBackwardsCompatibility 
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
           TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
       w.optimize();
+      FlexTestUtil.verifyFlexVsPreFlex(rand, w);
       w.close();
 
       _TestUtil.checkIndex(dir);
@@ -257,7 +270,7 @@ public class TestBackwardsCompatibility 
     }
   }
 
-  private void testHits(ScoreDoc[] hits, int expectedCount, IndexReader reader) throws IOException {
+  private void doTestHits(ScoreDoc[] hits, int expectedCount, IndexReader reader) throws IOException {
     final int hitCount = hits.length;
     assertEquals("wrong number of hits", expectedCount, hitCount);
     for(int i=0;i<hitCount;i++) {
@@ -267,7 +280,7 @@ public class TestBackwardsCompatibility 
   }
 
   public void searchIndex(String dirName, String oldName) throws IOException {
-    //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
+    //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer());
     //Query query = parser.parse("handle:1");
 
     dirName = fullDir(dirName);
@@ -318,7 +331,7 @@ public class TestBackwardsCompatibility 
     Document d = searcher.doc(hits[0].doc);
     assertEquals("didn't get the right document first", "21", d.get("id"));
 
-    testHits(hits, 34, searcher.getIndexReader());
+    doTestHits(hits, 34, searcher.getIndexReader());
 
     if (!oldName.startsWith("19.") &&
         !oldName.startsWith("20.") &&
@@ -374,7 +387,7 @@ public class TestBackwardsCompatibility 
     ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
     Document d = searcher.doc(hits[0].doc);
     assertEquals("wrong first document", "21", d.get("id"));
-    testHits(hits, 44, searcher.getIndexReader());
+    doTestHits(hits, 44, searcher.getIndexReader());
     searcher.close();
 
     // make sure we can do delete & setNorm against this
@@ -392,7 +405,7 @@ public class TestBackwardsCompatibility 
     assertEquals("wrong number of hits", 43, hits.length);
     d = searcher.doc(hits[0].doc);
     assertEquals("wrong first document", "22", d.get("id"));
-    testHits(hits, 43, searcher.getIndexReader());
+    doTestHits(hits, 43, searcher.getIndexReader());
     searcher.close();
 
     // optimize
@@ -404,7 +417,7 @@ public class TestBackwardsCompatibility 
     hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
     assertEquals("wrong number of hits", 43, hits.length);
     d = searcher.doc(hits[0].doc);
-    testHits(hits, 43, searcher.getIndexReader());
+    doTestHits(hits, 43, searcher.getIndexReader());
     assertEquals("wrong first document", "22", d.get("id"));
     searcher.close();
 
@@ -442,7 +455,7 @@ public class TestBackwardsCompatibility 
     assertEquals("wrong number of hits", 33, hits.length);
     d = searcher.doc(hits[0].doc);
     assertEquals("wrong first document", "22", d.get("id"));
-    testHits(hits, 33, searcher.getIndexReader());
+    doTestHits(hits, 33, searcher.getIndexReader());
     searcher.close();
 
     // optimize
@@ -455,7 +468,7 @@ public class TestBackwardsCompatibility 
     assertEquals("wrong number of hits", 33, hits.length);
     d = searcher.doc(hits[0].doc);
     assertEquals("wrong first document", "22", d.get("id"));
-    testHits(hits, 33, searcher.getIndexReader());
+    doTestHits(hits, 33, searcher.getIndexReader());
     searcher.close();
 
     dir.close();
@@ -593,6 +606,9 @@ public class TestBackwardsCompatibility 
       doc.add(new Field("compressedSize", Integer.toString(BINARY_COMPRESSED_LENGTH), Field.Store.YES, Field.Index.NOT_ANALYZED));
     }
     */
+    // add numeric fields, to test if flex preserves encoding
+    doc.add(new NumericField("trieInt", 4).setIntValue(id));
+    doc.add(new NumericField("trieLong", 4).setLongValue(id));
     writer.addDocument(doc);
   }
 
@@ -641,4 +657,105 @@ public class TestBackwardsCompatibility 
   /* This was used in 2.9 to generate an index with compressed field:
   static final int BINARY_COMPRESSED_LENGTH = CompressionTools.compress(BINARY_TO_COMPRESS).length;
   */
+
+  private int countDocs(DocsEnum docs) throws IOException {
+    int count = 0;
+    while((docs.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+      count ++;
+    }
+    return count;
+  }
+
+  // flex: test basics of TermsEnum api on non-flex index
+  public void testNextIntoWrongField() throws Exception {
+    for(int i=0;i<oldNames.length;i++) {
+      unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
+      String fullPath = fullDir(oldNames[i]);
+      Directory dir = FSDirectory.open(new File(fullPath));
+      IndexReader r = IndexReader.open(dir);
+      TermsEnum terms = MultiFields.getFields(r).terms("content").iterator();
+      BytesRef t = terms.next();
+      assertNotNull(t);
+
+      // content field only has term aaa:
+      assertEquals("aaa", t.utf8ToString());
+      assertNull(terms.next());
+
+      BytesRef aaaTerm = new BytesRef("aaa");
+
+      // should be found exactly
+      assertEquals(TermsEnum.SeekStatus.FOUND,
+                   terms.seek(aaaTerm));
+      assertEquals(35, countDocs(terms.docs(null, null)));
+      assertNull(terms.next());
+
+      // should hit end of field
+      assertEquals(TermsEnum.SeekStatus.END,
+                   terms.seek(new BytesRef("bbb")));
+      assertNull(terms.next());
+
+      // should seek to aaa
+      assertEquals(TermsEnum.SeekStatus.NOT_FOUND,
+                   terms.seek(new BytesRef("a")));
+      assertTrue(terms.term().bytesEquals(aaaTerm));
+      assertEquals(35, countDocs(terms.docs(null, null)));
+      assertNull(terms.next());
+
+      assertEquals(TermsEnum.SeekStatus.FOUND,
+                   terms.seek(aaaTerm));
+      assertEquals(35, countDocs(terms.docs(null, null)));
+      assertNull(terms.next());
+
+      r.close();
+      dir.close();
+      rmDir(oldNames[i]);
+    }
+  }
+  
+  public void testNumericFields() throws Exception {
+    for(int i=0;i<oldNames.length;i++) {
+      // only test indexes >= 3.0
+      if (oldNames[i].compareTo("30.") < 0) continue;
+      
+      unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
+      String fullPath = fullDir(oldNames[i]);
+      Directory dir = FSDirectory.open(new File(fullPath));
+      IndexSearcher searcher = new IndexSearcher(dir, true);
+      
+      for (int id=10; id<15; id++) {
+        ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
+        assertEquals("wrong number of hits", 1, hits.length);
+        Document d = searcher.doc(hits[0].doc);
+        assertEquals(String.valueOf(id), d.get("id"));
+        
+        hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
+        assertEquals("wrong number of hits", 1, hits.length);
+        d = searcher.doc(hits[0].doc);
+        assertEquals(String.valueOf(id), d.get("id"));
+      }
+      
+      // check that also lower-precision fields are ok
+      ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.MIN_VALUE, Integer.MAX_VALUE, false, false), 100).scoreDocs;
+      assertEquals("wrong number of hits", 34, hits.length);
+      
+      hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.MIN_VALUE, Long.MAX_VALUE, false, false), 100).scoreDocs;
+      assertEquals("wrong number of hits", 34, hits.length);
+      
+      // check decoding into field cache
+      int[] fci = FieldCache.DEFAULT.getInts(searcher.getIndexReader(), "trieInt");
+      for (int val : fci) {
+        assertTrue("value in id bounds", val >= 0 && val < 35);
+      }
+      
+      long[] fcl = FieldCache.DEFAULT.getLongs(searcher.getIndexReader(), "trieLong");
+      for (long val : fcl) {
+        assertTrue("value in id bounds", val >= 0L && val < 35L);
+      }
+      
+      searcher.close();
+      dir.close();
+      rmDir(oldNames[i]);
+    }
+  }
+
 }

Propchange: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  6 19:19:27 2010
@@ -1,4 +1,5 @@
+/lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:824912-931101
 /lucene/java/branches/lucene_2_4/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:748824
-/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:825998,829134,829881,831036,896850,909334
-/lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:880793,896906
-/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:924483-925561
+/lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:829134,829881,831036,896850,909334
+/lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:880793,896906,928290
+/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:924483-924731,924781,925176-925462

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDoc.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDoc.java Tue Apr  6 19:19:27 2010
@@ -36,6 +36,7 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.index.codecs.CodecProvider;
 
 
 /** JUnit adaptation of an older test case DocTest. */
@@ -185,20 +186,24 @@ public class TestDoc extends LuceneTestC
       SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
       SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
 
-      SegmentMerger merger = new SegmentMerger(si1.dir, merged);
+      SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL, merged, null, CodecProvider.getDefault());
 
       merger.add(r1);
       merger.add(r2);
       merger.merge();
       merger.closeReaders();
       
+      final SegmentInfo info = new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir,
+                                               useCompoundFile, true, -1, null, false, merger.hasProx(),
+                                               merger.getCodec());
+      
       if (useCompoundFile) {
-        List<String> filesToDelete = merger.createCompoundFile(merged + ".cfs");
+        List<String> filesToDelete = merger.createCompoundFile(merged + ".cfs", info);
         for (final String fileToDelete : filesToDelete) 
           si1.dir.deleteFile(fileToDelete);
       }
 
-      return new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir, useCompoundFile, true);
+      return info;
    }
 
 

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java Tue Apr  6 19:19:27 2010
@@ -21,7 +21,6 @@ package org.apache.lucene.index;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -42,6 +41,7 @@ import org.apache.lucene.document.FieldS
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.document.SetBasedFieldSelector;
 import org.apache.lucene.index.IndexReader.FieldOption;
+import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
@@ -881,15 +881,18 @@ public class TestIndexReader extends Luc
         d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
         d.add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
         writer.addDocument(d);
+        if (0==i%10)
+          writer.commit();
       }
       writer.close();
 
-      long diskUsage = startDir.sizeInBytes();
-      long diskFree = diskUsage+100;      
+      long diskUsage = ((MockRAMDirectory) startDir).getRecomputedActualSizeInBytes();
+      long diskFree = diskUsage+100;
 
       IOException err = null;
 
       boolean done = false;
+      boolean gotExc = false;
 
       // Iterate w/ ever increasing free disk space:
       while(!done) {
@@ -946,7 +949,7 @@ public class TestIndexReader extends Luc
               int docId = 12;
               for(int i=0;i<13;i++) {
                 reader.deleteDocument(docId);
-                reader.setNorm(docId, "contents", (float) 2.0);
+                reader.setNorm(docId, "content", (float) 2.0);
                 docId += 12;
               }
             }
@@ -961,6 +964,7 @@ public class TestIndexReader extends Luc
               e.printStackTrace(System.out);
             }
             err = e;
+            gotExc = true;
             if (1 == x) {
               e.printStackTrace();
               fail(testName + " hit IOException after disk space was freed up");
@@ -973,29 +977,7 @@ public class TestIndexReader extends Luc
           // new IndexFileDeleter, have it delete
           // unreferenced files, then verify that in fact
           // no files were deleted:
-          String[] startFiles = dir.listAll();
-          SegmentInfos infos = new SegmentInfos();
-          infos.read(dir);
-          new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
-          String[] endFiles = dir.listAll();
-
-          Arrays.sort(startFiles);
-          Arrays.sort(endFiles);
-
-          //for(int i=0;i<startFiles.length;i++) {
-          //  System.out.println("  startFiles: " + i + ": " + startFiles[i]);
-          //}
-
-          if (!Arrays.equals(startFiles, endFiles)) {
-            String successStr;
-            if (success) {
-              successStr = "success";
-            } else {
-              successStr = "IOException";
-              err.printStackTrace();
-            }
-            fail("reader.close() failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes): before delete:\n    " + arrayToString(startFiles) + "\n  after delete:\n    " + arrayToString(endFiles));
-          }
+          TestIndexWriter.assertNoUnreferencedFiles(dir, "reader.close() failed to delete unreferenced files");
 
           // Finally, verify index is not corrupt, and, if
           // we succeeded, we see all docs changed, and if
@@ -1050,6 +1032,8 @@ public class TestIndexReader extends Luc
           newReader.close();
 
           if (result2 == END_COUNT) {
+            if (!gotExc)
+              fail("never hit disk full");
             break;
           }
         }
@@ -1135,17 +1119,6 @@ public class TestIndexReader extends Luc
       dir.close();
     }
 
-    private String arrayToString(String[] l) {
-      String s = "";
-      for(int i=0;i<l.length;i++) {
-        if (i > 0) {
-          s += "\n    ";
-        }
-        s += l[i];
-      }
-      return s;
-    }
-
     public void testOpenReaderAfterDelete() throws IOException {
       File dirFile = new File(TEMP_DIR, "deletetest");
       Directory dir = FSDirectory.open(dirFile);
@@ -1410,7 +1383,7 @@ public class TestIndexReader extends Luc
       writer.close();
 
       SegmentInfos sis = new SegmentInfos();
-      sis.read(d);
+      sis.read(d, CodecProvider.getDefault());
       IndexReader r = IndexReader.open(d, false);
       IndexCommit c = r.getIndexCommit();
 
@@ -1597,6 +1570,7 @@ public class TestIndexReader extends Luc
   // LUCENE-1579: Ensure that on a cloned reader, segments
   // reuse the doc values arrays in FieldCache
   public void testFieldCacheReuseAfterClone() throws Exception {
+    //Codec.DEBUG = true;
     Directory dir = new MockRAMDirectory();
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     Document doc = new Document();
@@ -1750,7 +1724,6 @@ public class TestIndexReader extends Luc
     } catch (IllegalStateException ise) {
       // expected
     }
-    assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded());
 
     assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor());
     writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
@@ -1763,10 +1736,13 @@ public class TestIndexReader extends Luc
     IndexReader[] subReaders = r2.getSequentialSubReaders();
     assertEquals(2, subReaders.length);
     for(int i=0;i<2;i++) {
-      assertFalse(((SegmentReader) subReaders[i]).termsIndexLoaded());
+      try {
+        subReaders[i].docFreq(new Term("field", "f"));
+        fail("did not hit expected exception");
+      } catch (IllegalStateException ise) {
+        // expected
+      }
     }
-    r2.close();
-    dir.close();
   }
 
   // LUCENE-2046

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java Tue Apr  6 19:19:27 2010
@@ -863,6 +863,8 @@ public class TestIndexReaderReopen exten
     assertReaderClosed(reader, true, true);
     assertReaderClosed(firstReader, true, true);
 
+    FlexTestUtil.verifyFlexVsPreFlex(rnd, dir);
+
     dir.close();
   }
   

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Tue Apr  6 19:19:27 2010
@@ -23,11 +23,13 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.io.Reader;
 import java.io.StringReader;
+import java.util.List;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Set;
+import java.util.HashSet;
 import java.util.HashMap;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -49,6 +51,7 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.document.Field.Index;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field.TermVector;
@@ -72,6 +75,7 @@ import org.apache.lucene.store.SingleIns
 import org.apache.lucene.util.UnicodeUtil;
 import org.apache.lucene.util._TestUtil;
 import org.apache.lucene.util.ThreadInterruptedException;
+import org.apache.lucene.util.BytesRef;
 
 public class TestIndexWriter extends LuceneTestCase {
     public TestIndexWriter(String name) {
@@ -525,7 +529,7 @@ public class TestIndexWriter extends Luc
       String[] startFiles = dir.listAll();
       SegmentInfos infos = new SegmentInfos();
       infos.read(dir);
-      new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
+      new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null, CodecProvider.getDefault());
       String[] endFiles = dir.listAll();
 
       Arrays.sort(startFiles);
@@ -544,13 +548,12 @@ public class TestIndexWriter extends Luc
       IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
         TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
 
-      char[] chars = new char[DocumentsWriter.CHAR_BLOCK_SIZE-1];
+      char[] chars = new char[DocumentsWriter.MAX_TERM_LENGTH_UTF8];
       Arrays.fill(chars, 'x');
       Document doc = new Document();
       final String bigTerm = new String(chars);
 
-      // Max length term is 16383, so this contents produces
-      // a too-long term:
+      // This produces a too-long term:
       String contents = "abc xyz x" + bigTerm + " another term";
       doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
       writer.addDocument(doc);
@@ -3306,7 +3309,7 @@ public class TestIndexWriter extends Luc
   // LUCENE-510
   public void testAllUnicodeChars() throws Throwable {
 
-    UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
+    BytesRef utf8 = new BytesRef(10);
     UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
     char[] chars = new char[2];
     for(int ch=0;ch<0x0010FFFF;ch++) {
@@ -3326,16 +3329,16 @@ public class TestIndexWriter extends Luc
       UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
       
       String s1 = new String(chars, 0, len);
-      String s2 = new String(utf8.result, 0, utf8.length, "UTF-8");
+      String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
       assertEquals("codepoint " + ch, s1, s2);
 
-      UnicodeUtil.UTF8toUTF16(utf8.result, 0, utf8.length, utf16);
+      UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
       assertEquals("codepoint " + ch, s1, new String(utf16.result, 0, utf16.length));
 
       byte[] b = s1.getBytes("UTF-8");
       assertEquals(utf8.length, b.length);
       for(int j=0;j<utf8.length;j++)
-        assertEquals(utf8.result[j], b[j]);
+        assertEquals(utf8.bytes[j], b[j]);
     }
   }
 
@@ -3400,7 +3403,7 @@ public class TestIndexWriter extends Luc
     char[] buffer = new char[20];
     char[] expected = new char[20];
 
-    UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
+    BytesRef utf8 = new BytesRef(20);
     UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
 
     for(int iter=0;iter<100000;iter++) {
@@ -3411,10 +3414,10 @@ public class TestIndexWriter extends Luc
         byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
         assertEquals(b.length, utf8.length);
         for(int i=0;i<b.length;i++)
-          assertEquals(b[i], utf8.result[i]);
+          assertEquals(b[i], utf8.bytes[i]);
       }
 
-      UnicodeUtil.UTF8toUTF16(utf8.result, 0, utf8.length, utf16);
+      UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
       assertEquals(utf16.length, 20);
       for(int i=0;i<20;i++)
         assertEquals(expected[i], utf16.result[i]);
@@ -3427,7 +3430,7 @@ public class TestIndexWriter extends Luc
     char[] buffer = new char[20];
     char[] expected = new char[20];
 
-    UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
+    BytesRef utf8 = new BytesRef(new byte[20]);
     UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
     UnicodeUtil.UTF16Result utf16a = new UnicodeUtil.UTF16Result();
 
@@ -3450,7 +3453,7 @@ public class TestIndexWriter extends Luc
         byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
         assertEquals(b.length, utf8.length);
         for(int i=0;i<b.length;i++)
-          assertEquals(b[i], utf8.result[i]);
+          assertEquals(b[i], utf8.bytes[i]);
       }
 
       int bytePrefix = 20;
@@ -3458,18 +3461,18 @@ public class TestIndexWriter extends Luc
         bytePrefix = 0;
       else
         for(int i=0;i<20;i++)
-          if (last[i] != utf8.result[i]) {
+          if (last[i] != utf8.bytes[i]) {
             bytePrefix = i;
             break;
           }
-      System.arraycopy(utf8.result, 0, last, 0, utf8.length);
+      System.arraycopy(utf8.bytes, 0, last, 0, utf8.length);
 
-      UnicodeUtil.UTF8toUTF16(utf8.result, bytePrefix, utf8.length-bytePrefix, utf16);
+      UnicodeUtil.UTF8toUTF16(utf8.bytes, bytePrefix, utf8.length-bytePrefix, utf16);
       assertEquals(20, utf16.length);
       for(int i=0;i<20;i++)
         assertEquals(expected[i], utf16.result[i]);
 
-      UnicodeUtil.UTF8toUTF16(utf8.result, 0, utf8.length, utf16a);
+      UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16a);
       assertEquals(20, utf16a.length);
       for(int i=0;i<20;i++)
         assertEquals(expected[i], utf16a.result[i]);
@@ -4335,11 +4338,6 @@ public class TestIndexWriter extends Luc
       new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))).close();
 
       assertTrue(dir.fileExists("myrandomfile"));
-
-      // Make sure this does not copy myrandomfile:
-      Directory dir2 = new RAMDirectory(dir);
-      assertTrue(!dir2.fileExists("myrandomfile"));
-
     } finally {
       dir.close();
       _TestUtil.rmDir(indexDir);
@@ -4609,8 +4607,10 @@ public class TestIndexWriter extends Luc
     doc = new Document();
     doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
     w.addDocument(doc);
+    IndexReader r = w.getReader();
+    assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
+    r.close();
     w.close();
-
     _TestUtil.checkIndex(d);
     d.close();
   }
@@ -4630,8 +4630,8 @@ public class TestIndexWriter extends Luc
     _TestUtil.checkIndex(dir);
     dir.close();
   }
-
-  // LUCENE-2095: make sure with multiple threads commit
+  
+    // LUCENE-2095: make sure with multiple threads commit
   // doesn't return until all changes are in fact in the
   // index
   public void testCommitThreadSafety() throws Throwable {
@@ -4686,6 +4686,172 @@ public class TestIndexWriter extends Luc
     assertFalse(failed.get());
   }
 
+  // both start & end are inclusive
+  private final int getInt(Random r, int start, int end) {
+    return start + r.nextInt(1+end-start);
+  }
+
+  private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
+    TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
+
+    char[] last = new char[2];
+    int lastLength = 0;
+
+    Set<String> seenTerms = new HashSet<String>();
+
+    UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
+    while(true) {
+      final BytesRef term = terms.next();
+      if (term == null) {
+        break;
+      }
+      UnicodeUtil.UTF8toUTF16(term.bytes, term.offset, term.length, utf16);
+      assertTrue(utf16.length <= 2);
+
+      // Make sure last term comes before current one, in
+      // UTF16 sort order
+      int i = 0;
+      for(i=0;i<lastLength && i<utf16.length;i++) {
+        assertTrue("UTF16 code unit " + termDesc(new String(utf16.result, 0, utf16.length)) + " incorrectly sorted after code unit " + termDesc(new String(last, 0, lastLength)), last[i] <= utf16.result[i]);
+        if (last[i] < utf16.result[i]) {
+          break;
+        }
+      }
+      // Terms should not have been identical
+      assertTrue(lastLength != utf16.length || i < lastLength);
+
+      final String s = new String(utf16.result, 0, utf16.length);
+      assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
+      seenTerms.add(s);
+
+      System.arraycopy(utf16.result, 0, last, 0, utf16.length);
+      lastLength = utf16.length;
+    }
+
+    if (isTop) {
+      assertTrue(allTerms.equals(seenTerms));
+    }
+
+    // Test seeking:
+    Iterator<String> it = seenTerms.iterator();
+    while(it.hasNext()) {
+      BytesRef tr = new BytesRef(it.next());
+      assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
+                   TermsEnum.SeekStatus.FOUND,
+                   terms.seek(tr));
+    }
+  }
+
+  private final String asUnicodeChar(char c) {
+    return "U+" + Integer.toHexString(c);
+  }
+
+  private final String termDesc(String s) {
+    final String s0;
+    assertTrue(s.length() <= 2);
+    if (s.length() == 1) {
+      s0 = asUnicodeChar(s.charAt(0));
+    } else {
+      s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
+    }
+    return s0;
+  }
+
+  // Make sure terms, including ones with surrogate pairs,
+  // sort in UTF16 sort order by default
+  public void testTermUTF16SortOrder() throws Throwable {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+    Document d = new Document();
+    // Single segment
+    Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    d.add(f);
+    char[] chars = new char[2];
+    Random rnd = newRandom();
+    final Set<String> allTerms = new HashSet<String>();
+
+    for(int i=0;i<200;i++) {
+
+      final String s;
+      if (rnd.nextBoolean()) {
+        // Single char
+        if (rnd.nextBoolean()) {
+          // Above surrogates
+          chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
+        } else {
+          // Below surrogates
+          chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
+        }
+        s = new String(chars, 0, 1);
+      } else {
+        // Surrogate pair
+        chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
+        assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
+        chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
+        s = new String(chars, 0, 2);
+      }
+      allTerms.add(s);
+      f.setValue(s);
+
+      //System.out.println("add " + termDesc(s));
+      writer.addDocument(d);
+
+      if ((1+i) % 42 == 0) {
+        writer.commit();
+      }
+    }
+    
+    IndexReader r = writer.getReader();
+
+    // Test each sub-segment
+    final IndexReader[] subs = r.getSequentialSubReaders();
+    assertEquals(5, subs.length);
+    for(int i=0;i<subs.length;i++) {
+      checkTermsOrder(subs[i], allTerms, false);
+    }
+    checkTermsOrder(r, allTerms, true);
+
+    // Test multi segment
+    r.close();
+
+    writer.optimize();
+
+    // Test optimized single segment
+    r = writer.getReader();
+    checkTermsOrder(r, allTerms, true);
+    r.close();
+
+    writer.close();
+    dir.close();
+  }
+
+  public void testIndexDivisor() throws Exception {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+    StringBuilder s = new StringBuilder();
+    // must be > 256
+    for(int i=0;i<300;i++) {
+      s.append(' ').append(""+i);
+    }
+    Document d = new Document();
+    Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
+    d.add(f);
+    w.addDocument(d);
+    IndexReader r = w.getReader(2).getSequentialSubReaders()[0];
+    TermsEnum t = r.fields().terms("field").iterator();
+    int count = 0;
+    while(t.next() != null) {
+      final DocsEnum docs = t.docs(null, null);
+      assertEquals(0, docs.nextDoc());
+      assertEquals(DocsEnum.NO_MORE_DOCS, docs.nextDoc());
+      count++;
+    }
+    assertEquals(300, count);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
   public void testDeleteUnusedFiles() throws Exception {
 
     for(int iter=0;iter<2;iter++) {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java Tue Apr  6 19:19:27 2010
@@ -30,6 +30,7 @@ import org.apache.lucene.analysis.Whites
 import org.apache.lucene.index.DocumentsWriter.IndexingChain;
 import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.store.Directory;
@@ -81,6 +82,7 @@ public class TestIndexWriterConfig exten
     assertEquals(IndexWriterConfig.DEFAULT_READER_POOLING, conf.getReaderPooling());
     assertTrue(DocumentsWriter.defaultIndexingChain == conf.getIndexingChain());
     assertNull(conf.getMergedSegmentWarmer());
+    assertEquals(IndexWriterConfig.DEFAULT_CODEC_PROVIDER, CodecProvider.getDefault());
     assertEquals(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, conf.getMaxThreadStates());
     assertEquals(LogByteSizeMergePolicy.class, conf.getMergePolicy().getClass());
     
@@ -101,6 +103,7 @@ public class TestIndexWriterConfig exten
     getters.add("getMaxBufferedDocs");
     getters.add("getIndexingChain");
     getters.add("getMergedSegmentWarmer");
+    getters.add("getCodecProvider");
     getters.add("getMergePolicy");
     getters.add("getMaxThreadStates");
     getters.add("getReaderPooling");

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Tue Apr  6 19:19:27 2010
@@ -18,7 +18,6 @@ package org.apache.lucene.index;
  */
 
 import java.io.IOException;
-import java.util.Arrays;
 
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
@@ -770,30 +769,22 @@ public class TestIndexWriterDelete exten
       }
     }
 
-    String[] startFiles = dir.listAll();
-    SegmentInfos infos = new SegmentInfos();
-    infos.read(dir);
-    new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
-    String[] endFiles = dir.listAll();
-
-    if (!Arrays.equals(startFiles, endFiles)) {
-      fail("docswriter abort() failed to delete unreferenced files:\n  before delete:\n    "
-           + arrayToString(startFiles) + "\n  after delete:\n    "
-           + arrayToString(endFiles));
-    }
-
+    TestIndexWriter.assertNoUnreferencedFiles(dir, "docsWriter.abort() failed to delete unreferenced files");
     modifier.close();
-
   }
 
-  private String arrayToString(String[] l) {
-    String s = "";
-    for (int i = 0; i < l.length; i++) {
-      if (i > 0) {
-        s += "\n    ";
-      }
-      s += l[i];
+  public void testDeleteNullQuery() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+
+    for (int i = 0; i < 5; i++) {
+      addDoc(modifier, i, 2*i);
     }
-    return s;
+
+    modifier.deleteDocuments(new TermQuery(new Term("nada", "nada")));
+    modifier.commit();
+    assertEquals(5, modifier.numDocs());
+    modifier.close();
+    dir.close();
   }
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java Tue Apr  6 19:19:27 2010
@@ -84,7 +84,6 @@ public class TestIndexWriterReader exten
 
     // get a reader
     IndexReader r1 = writer.getReader();
-    assertTrue(r1.isCurrent());
 
     String id10 = r1.document(10).getField("id").stringValue();
     
@@ -92,20 +91,15 @@ public class TestIndexWriterReader exten
     newDoc.removeField("id");
     newDoc.add(new Field("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
     writer.updateDocument(new Term("id", id10), newDoc);
-    assertFalse(r1.isCurrent());
 
     IndexReader r2 = writer.getReader();
-    assertTrue(r2.isCurrent());
     assertEquals(0, count(new Term("id", id10), r2));
     assertEquals(1, count(new Term("id", Integer.toString(8000)), r2));
     
     r1.close();
     writer.close();
-    assertTrue(r2.isCurrent());
     
     IndexReader r3 = IndexReader.open(dir1, true);
-    assertTrue(r3.isCurrent());
-    assertTrue(r2.isCurrent());
     assertEquals(0, count(new Term("id", id10), r3));
     assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
 
@@ -149,18 +143,9 @@ public class TestIndexWriterReader exten
     createIndexNoClose(!optimize, "index2", writer2);
     writer2.close();
 
-    IndexReader r0 = writer.getReader();
-    assertTrue(r0.isCurrent());
     writer.addIndexesNoOptimize(new Directory[] { dir2 });
-    assertFalse(r0.isCurrent());
-    r0.close();
 
     IndexReader r1 = writer.getReader();
-    assertTrue(r1.isCurrent());
-
-    writer.commit();
-    assertTrue(r1.isCurrent());
-
     assertEquals(200, r1.maxDoc());
 
     int index2df = r1.docFreq(new Term("indexname", "index2"));

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java Tue Apr  6 19:19:27 2010
@@ -48,7 +48,7 @@ public class TestLazyProxSkipping extend
       @Override
       public IndexInput openInput(String name) throws IOException {
         IndexInput ii = super.openInput(name);
-        if (name.endsWith(".prx")) {
+        if (name.endsWith(".prx") || name.endsWith(".pos") ) {
           // we decorate the proxStream with a wrapper class that allows to count the number of calls of seek()
           ii = new SeeksCountingStream(ii);
         }
@@ -107,7 +107,7 @@ public class TestLazyProxSkipping extend
         
         // check if the number of calls of seek() does not exceed the number of hits
         assertTrue(this.seeksCounter > 0);
-        assertTrue(this.seeksCounter <= numHits + 1);
+        assertTrue("seeksCounter=" + this.seeksCounter + " numHits=" + numHits, this.seeksCounter <= numHits + 1);
     }
     
     public void testLazySkipping() throws IOException {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java Tue Apr  6 19:19:27 2010
@@ -29,8 +29,9 @@ import org.apache.lucene.document.Docume
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Index;
 import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 
 /**
@@ -42,8 +43,18 @@ import org.apache.lucene.util.LuceneTest
  * 
  */
 public class TestMultiLevelSkipList extends LuceneTestCase {
+  
+  class CountingRAMDirectory extends MockRAMDirectory {
+    public IndexInput openInput(String fileName) throws IOException {
+      IndexInput in = super.openInput(fileName);
+      if (fileName.endsWith(".frq"))
+        in = new CountingStream(in);
+      return in;
+    }
+  }
+
   public void testSimpleSkip() throws IOException {
-    RAMDirectory dir = new RAMDirectory();
+    Directory dir = new CountingRAMDirectory();
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()));
     Term term = new Term("test", "a");
     for (int i = 0; i < 5000; i++) {
@@ -56,9 +67,8 @@ public class TestMultiLevelSkipList exte
     writer.close();
 
     IndexReader reader = SegmentReader.getOnlySegmentReader(dir);
-    SegmentTermPositions tp = (SegmentTermPositions) reader.termPositions();
-    tp.freqStream = new CountingStream(tp.freqStream);
-
+    TermPositions tp = reader.termPositions();
+    
     for (int i = 0; i < 2; i++) {
       counter = 0;
       tp.seek(term);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestNorms.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestNorms.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestNorms.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestNorms.java Tue Apr  6 19:19:27 2010
@@ -186,6 +186,7 @@ public class TestNorms extends LuceneTes
         assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
       }
     }
+    ir.close();
   }
 
   private void addDocs(Directory dir, int ndocs, boolean compound) throws IOException {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitTf.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitTf.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitTf.java Tue Apr  6 19:19:27 2010
@@ -19,6 +19,7 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Random;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
@@ -26,13 +27,7 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.*;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockRAMDirectory;
@@ -85,20 +80,26 @@ public class TestOmitTf extends LuceneTe
     // keep things constant
     d = new Document();
         
-    // Reverese
+    // Reverse
     f1.setOmitTermFreqAndPositions(true);
     d.add(f1);
         
     f2.setOmitTermFreqAndPositions(false);        
     d.add(f2);
         
+    Random rnd = newRandom();
+
     writer.addDocument(d);
+    FlexTestUtil.verifyFlexVsPreFlex(rnd, writer);
+
     // force merge
     writer.optimize();
     // flush
     writer.close();
     _TestUtil.checkIndex(ram);
 
+    FlexTestUtil.verifyFlexVsPreFlex(rnd, ram);
+
     SegmentReader reader = SegmentReader.getOnlySegmentReader(ram);
     FieldInfos fi = reader.fieldInfos();
     assertTrue("OmitTermFreqAndPositions field bit should be set.", fi.fieldInfo("f1").omitTermFreqAndPositions);
@@ -144,8 +145,12 @@ public class TestOmitTf extends LuceneTe
     for(int i=0;i<30;i++)
       writer.addDocument(d);
         
+    Random rnd = newRandom();
+    FlexTestUtil.verifyFlexVsPreFlex(rnd, writer);
+
     // force merge
     writer.optimize();
+    FlexTestUtil.verifyFlexVsPreFlex(rnd, writer);
     // flush
     writer.close();
 
@@ -289,6 +294,15 @@ public class TestOmitTf extends LuceneTe
     TermQuery q3 = new TermQuery(c);
     TermQuery q4 = new TermQuery(d);
 
+    PhraseQuery pq = new PhraseQuery();
+    pq.add(a);
+    pq.add(c);
+    try {
+      searcher.search(pq, 10);
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
         
     searcher.search(q1,
                     new CountingHitCollector() {
@@ -380,7 +394,7 @@ public class TestOmitTf extends LuceneTe
                         super.collect(doc);
                       }
                     });
-    assertTrue(15 == CountingHitCollector.getCount());
+    assertEquals(15, CountingHitCollector.getCount());
         
     searcher.close();     
     dir.close();

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPayloads.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPayloads.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPayloads.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPayloads.java Tue Apr  6 19:19:27 2010
@@ -39,7 +39,8 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.UnicodeUtil;
 import org.apache.lucene.util._TestUtil;
@@ -99,7 +100,7 @@ public class TestPayloads extends Lucene
     // payload bit in the FieldInfo
     public void testPayloadFieldBit() throws Exception {
         rnd = newRandom();
-        Directory ram = new RAMDirectory();
+        Directory ram = new MockRAMDirectory();
         PayloadAnalyzer analyzer = new PayloadAnalyzer();
         IndexWriter writer = new IndexWriter(ram, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
         Document d = new Document();
@@ -139,6 +140,9 @@ public class TestPayloads extends Lucene
         analyzer.setPayloadData("f2", "somedata".getBytes(), 0, 1);
         analyzer.setPayloadData("f3", "somedata".getBytes(), 0, 3);
         writer.addDocument(d);
+
+        FlexTestUtil.verifyFlexVsPreFlex(rnd, writer);
+
         // force merge
         writer.optimize();
         // flush
@@ -149,14 +153,15 @@ public class TestPayloads extends Lucene
         assertFalse("Payload field bit should not be set.", fi.fieldInfo("f1").storePayloads);
         assertTrue("Payload field bit should be set.", fi.fieldInfo("f2").storePayloads);
         assertTrue("Payload field bit should be set.", fi.fieldInfo("f3").storePayloads);
-        reader.close();        
+        reader.close();
+        FlexTestUtil.verifyFlexVsPreFlex(rnd, ram);
     }
 
     // Tests if payloads are correctly stored and loaded using both RamDirectory and FSDirectory
     public void testPayloadsEncoding() throws Exception {
         rnd = newRandom();
         // first perform the test using a RAMDirectory
-        Directory dir = new RAMDirectory();
+        Directory dir = new MockRAMDirectory();
         performTest(dir);
         
         // now use a FSDirectory and repeat same test
@@ -215,7 +220,9 @@ public class TestPayloads extends Lucene
             writer.addDocument(d);
         }
         
+        FlexTestUtil.verifyFlexVsPreFlex(rnd, writer);
         writer.optimize();
+        FlexTestUtil.verifyFlexVsPreFlex(rnd, writer);
         // flush
         writer.close();
         
@@ -260,11 +267,17 @@ public class TestPayloads extends Lucene
         TermPositions tp = reader.termPositions(terms[0]);
         tp.next();
         tp.nextPosition();
+        // NOTE: prior rev of this test was failing to first
+        // call next here:
+        tp.next();
         // now we don't read this payload
         tp.nextPosition();
         assertEquals("Wrong payload length.", 1, tp.getPayloadLength());
         byte[] payload = tp.getPayload(null, 0);
         assertEquals(payload[0], payloadData[numTerms]);
+        // NOTE: prior rev of this test was failing to first
+        // call next here:
+        tp.next();
         tp.nextPosition();
         
         // we don't read this payload and skip to a different document
@@ -321,7 +334,9 @@ public class TestPayloads extends Lucene
         writer.addDocument(d);
 
         
+        FlexTestUtil.verifyFlexVsPreFlex(rnd, writer);
         writer.optimize();
+        FlexTestUtil.verifyFlexVsPreFlex(rnd, writer);
         // flush
         writer.close();
         
@@ -469,7 +484,7 @@ public class TestPayloads extends Lucene
         final int numDocs = 50;
         final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
         
-        Directory dir = new RAMDirectory();
+        Directory dir = new MockRAMDirectory();
         final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
             TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
         final String field = "test";
@@ -563,13 +578,13 @@ public class TestPayloads extends Lucene
             }
         }
         
-        private UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result();
+        private BytesRef utf8Result = new BytesRef(10);
 
         synchronized String bytesToString(byte[] bytes) {
             String s = new String(bytes);
             UnicodeUtil.UTF16toUTF8(s, 0, s.length(), utf8Result);
             try {
-                return new String(utf8Result.result, 0, utf8Result.length, "UTF-8");
+                return new String(utf8Result.bytes, 0, utf8Result.length, "UTF-8");
             } catch (UnsupportedEncodingException uee) {
                 return null;
             }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java Tue Apr  6 19:19:27 2010
@@ -18,9 +18,11 @@ package org.apache.lucene.index;
  */
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.codecs.CodecProvider;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -63,14 +65,16 @@ public class TestSegmentMerger extends L
   }
   
   public void testMerge() throws IOException {                             
-    SegmentMerger merger = new SegmentMerger(mergedDir, mergedSegment);
+    SegmentMerger merger = new SegmentMerger(mergedDir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, null, CodecProvider.getDefault());
     merger.add(reader1);
     merger.add(reader2);
     int docsMerged = merger.merge();
     merger.closeReaders();
     assertTrue(docsMerged == 2);
     //Should be able to open a new SegmentReader against the new directory
-    SegmentReader mergedReader = SegmentReader.get(true, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+    SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true,
+        -1, null, false, merger.hasProx(), merger.getCodec()), BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, null);
+
     assertTrue(mergedReader != null);
     assertTrue(mergedReader.numDocs() == 2);
     Document newDoc1 = mergedReader.document(0);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentReader.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentReader.java Tue Apr  6 19:19:27 2010
@@ -136,6 +136,9 @@ public class TestSegmentReader extends L
     TermPositions positions = reader.termPositions();
     assertTrue(positions != null);
     positions.seek(new Term(DocHelper.TEXT_FIELD_1_KEY, "field"));
+    // NOTE: prior rev of this test was failing to first
+    // call next here:
+    assertTrue(positions.next());
     assertTrue(positions.doc() == 0);
     assertTrue(positions.nextPosition() >= 0);
   }    

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java Tue Apr  6 19:19:27 2010
@@ -56,13 +56,13 @@ public class TestSegmentTermDocs extends
     SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
     assertTrue(reader != null);
     assertEquals(indexDivisor, reader.getTermInfosIndexDivisor());
-    SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
-    segTermDocs.seek(new Term(DocHelper.TEXT_FIELD_2_KEY, "field"));
-    if (segTermDocs.next() == true)
-    {
-      int docId = segTermDocs.doc();
+    TermDocs termDocs = reader.termDocs();
+    assertTrue(termDocs != null);
+    termDocs.seek(new Term(DocHelper.TEXT_FIELD_2_KEY, "field"));
+    if (termDocs.next() == true)    {
+      int docId = termDocs.doc();
       assertTrue(docId == 0);
-      int freq = segTermDocs.freq();
+      int freq = termDocs.freq();
       assertTrue(freq == 3);  
     }
     reader.close();
@@ -77,18 +77,20 @@ public class TestSegmentTermDocs extends
       //After adding the document, we should be able to read it back in
       SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
       assertTrue(reader != null);
-      SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
-      segTermDocs.seek(new Term("textField2", "bad"));
-      assertTrue(segTermDocs.next() == false);
+      TermDocs termDocs = reader.termDocs();
+      assertTrue(termDocs != null);
+      termDocs.seek(new Term("textField2", "bad"));
+      assertTrue(termDocs.next() == false);
       reader.close();
     }
     {
       //After adding the document, we should be able to read it back in
       SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
       assertTrue(reader != null);
-      SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
-      segTermDocs.seek(new Term("junk", "bad"));
-      assertTrue(segTermDocs.next() == false);
+      TermDocs termDocs = reader.termDocs();
+      assertTrue(termDocs != null);
+      termDocs.seek(new Term("junk", "bad"));
+      assertTrue(termDocs.next() == false);
       reader.close();
     }
   }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java?rev=931278&r1=931277&r2=931278&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java Tue Apr  6 19:19:27 2010
@@ -67,14 +67,16 @@ public class TestSegmentTermEnum extends
     addDoc(writer, "aaa bbb");
     writer.close();
     SegmentReader reader = SegmentReader.getOnlySegmentReader(dir);
-    SegmentTermEnum termEnum = (SegmentTermEnum) reader.terms();
-    assertTrue(termEnum.next());
-    assertEquals("aaa", termEnum.term().text());
-    assertTrue(termEnum.next());
-    assertEquals("aaa", termEnum.prev().text());
-    assertEquals("bbb", termEnum.term().text());
-    assertFalse(termEnum.next());
-    assertEquals("bbb", termEnum.prev().text());
+    TermsEnum terms = reader.fields().terms("content").iterator();
+    assertNotNull(terms.next());
+    assertEquals("aaa", terms.term().utf8ToString());
+    assertNotNull(terms.next());
+    long ordB = terms.ord();
+    assertEquals("bbb", terms.term().utf8ToString());
+    assertNull(terms.next());
+
+    assertEquals(TermsEnum.SeekStatus.FOUND, terms.seek(ordB));
+    assertEquals("bbb", terms.term().utf8ToString());
   }
 
   private void verifyDocFreq()