You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2012/01/18 23:28:20 UTC

svn commit: r1233096 [8/13] - in /lucene/dev/branches/solrcloud: ./ dev-tools/eclipse/ dev-tools/idea/.idea/ dev-tools/idea/lucene/contrib/ dev-tools/idea/modules/analysis/kuromoji/ dev-tools/idea/solr/contrib/analysis-extras/ dev-tools/maven/modules/a...

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/Builder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/Builder.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/Builder.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/Builder.java Wed Jan 18 22:28:07 2012
@@ -19,7 +19,6 @@ package org.apache.lucene.util.fst;
 
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.RamUsageEstimator;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.fst.FST.INPUT_TYPE; // javadoc
 
@@ -165,8 +164,8 @@ public class Builder<T> {
   }
 
   /** Pass false to disable the array arc optimization
-   *  while building the FST.  This is necessary if
-   *  encoding a single arc may take more than 255 bytes. */
+   *  while building the FST; this will make the resulting
+   *  FST smaller but slower to traverse. */
   public void setAllowArrayArcs(boolean b) {
     fst.setAllowArrayArcs(b);
   }
@@ -290,54 +289,6 @@ public class Builder<T> {
     }
   }
 
-  private final IntsRef scratchIntsRef = new IntsRef(10);
-
-  public void add(BytesRef input, T output) throws IOException {
-    assert fst.getInputType() == FST.INPUT_TYPE.BYTE1;
-    scratchIntsRef.grow(input.length);
-    for(int i=0;i<input.length;i++) {
-      scratchIntsRef.ints[i] = input.bytes[i+input.offset] & 0xFF;
-    }
-    scratchIntsRef.length = input.length;
-    add(scratchIntsRef, output);
-  }
-
-  /** Sugar: adds the UTF32 codepoints from char[] slice.  FST
-   *  must be FST.INPUT_TYPE.BYTE4! */
-  public void add(char[] s, int offset, int length, T output) throws IOException {
-    assert fst.getInputType() == FST.INPUT_TYPE.BYTE4;
-    int charIdx = offset;
-    int intIdx = 0;
-    final int charLimit = offset + length;
-    while(charIdx < charLimit) {
-      scratchIntsRef.grow(intIdx+1);
-      final int utf32 = Character.codePointAt(s, charIdx);
-      scratchIntsRef.ints[intIdx] = utf32;
-      charIdx += Character.charCount(utf32);
-      intIdx++;
-    }
-    scratchIntsRef.length = intIdx;
-    add(scratchIntsRef, output);
-  }
-
-  /** Sugar: adds the UTF32 codepoints from CharSequence.  FST
-   *  must be FST.INPUT_TYPE.BYTE4! */
-  public void add(CharSequence s, T output) throws IOException {
-    assert fst.getInputType() == FST.INPUT_TYPE.BYTE4;
-    int charIdx = 0;
-    int intIdx = 0;
-    final int charLimit = s.length();
-    while(charIdx < charLimit) {
-      scratchIntsRef.grow(intIdx+1);
-      final int utf32 = Character.codePointAt(s, charIdx);
-      scratchIntsRef.ints[intIdx] = utf32;
-      charIdx += Character.charCount(utf32);
-      intIdx++;
-    }
-    scratchIntsRef.length = intIdx;
-    add(scratchIntsRef, output);
-  }
-
   // for debugging
   /*
   private String toString(BytesRef b) {

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/FST.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/FST.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/FST.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/FST.java Wed Jan 18 22:28:07 2012
@@ -92,7 +92,10 @@ public class FST<T> {
   /** Changed numBytesPerArc for array'd case from byte to int. */
   private final static int VERSION_INT_NUM_BYTES_PER_ARC = 1;
 
-  private final static int VERSION_CURRENT = VERSION_INT_NUM_BYTES_PER_ARC;
+  /** Write BYTE2 labels as 2-byte short, not vInt. */
+  private final static int VERSION_SHORT_BYTE2_LABELS = 2;
+
+  private final static int VERSION_CURRENT = VERSION_SHORT_BYTE2_LABELS;
 
   // Never serialized; just used to represent the virtual
   // final node w/ no arcs:
@@ -107,7 +110,8 @@ public class FST<T> {
   T emptyOutput;
   private byte[] emptyOutputBytes;
 
-  private byte[] bytes;
+  // Not private to avoid synthetic access$NNN methods:
+  byte[] bytes;
   int byteUpto = 0;
 
   private int startNode = -1;
@@ -199,7 +203,9 @@ public class FST<T> {
   public FST(DataInput in, Outputs<T> outputs) throws IOException {
     this.outputs = outputs;
     writer = null;
-    CodecUtil.checkHeader(in, FILE_FORMAT_NAME, VERSION_INT_NUM_BYTES_PER_ARC, VERSION_INT_NUM_BYTES_PER_ARC);
+    // NOTE: only reads most recent format; we don't have
+    // back-compat promise for FSTs (they are experimental):
+    CodecUtil.checkHeader(in, FILE_FORMAT_NAME, VERSION_SHORT_BYTE2_LABELS, VERSION_SHORT_BYTE2_LABELS);
     if (in.readByte() == 1) {
       // accepts empty string
       int numBytes = in.readVInt();
@@ -389,7 +395,7 @@ public class FST<T> {
       writer.writeByte((byte) v);
     } else if (inputType == INPUT_TYPE.BYTE2) {
       assert v <= 65535: "v=" + v;
-      writer.writeVInt(v);
+      writer.writeShort((short) v);
     } else {
       //writeInt(v);
       writer.writeVInt(v);
@@ -399,7 +405,11 @@ public class FST<T> {
   int readLabel(DataInput in) throws IOException {
     final int v;
     if (inputType == INPUT_TYPE.BYTE1) {
+      // Unsigned byte:
       v = in.readByte()&0xFF;
+    } else if (inputType == INPUT_TYPE.BYTE2) {
+      // Unsigned short:
+      v = in.readShort()&0xFFFF;
     } else { 
       v = in.readVInt();
     }

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/PositiveIntOutputs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/PositiveIntOutputs.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/PositiveIntOutputs.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/PositiveIntOutputs.java Wed Jan 18 22:28:07 2012
@@ -25,8 +25,10 @@ import org.apache.lucene.store.DataOutpu
 /**
  * Output is a long, for each input term.  NOTE: the
  * resulting FST is not guaranteed to be minimal!  See
- * {@link Builder}.  You cannot store 0 output with this
- * (that's reserved to mean "no output")!
+ * {@link Builder}.  You must use {@link #get} to obtain the
+ * output for a given long value -- do not use autoboxing
+ * nor create your own Long instance (the value 0
+ * must map to the {@link #getNoOutput} singleton).
  *
  * @lucene.experimental
  */

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/Util.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/Util.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/Util.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/fst/Util.java Wed Jan 18 22:28:07 2012
@@ -31,10 +31,8 @@ public final class Util {
   }
 
   /** Looks up the output for this input, or null if the
-   *  input is not accepted. FST must be
-   *  INPUT_TYPE.BYTE4. */
+   *  input is not accepted. */
   public static<T> T get(FST<T> fst, IntsRef input) throws IOException {
-    assert fst.inputType == FST.INPUT_TYPE.BYTE4;
 
     // TODO: would be nice not to alloc this on every lookup
     final FST.Arc<T> arc = fst.getFirstArc(new FST.Arc<T>());
@@ -59,77 +57,7 @@ public final class Util {
     }
   }
 
-  /** Logically casts input to UTF32 ints then looks up the output
-   *  or null if the input is not accepted.  FST must be
-   *  INPUT_TYPE.BYTE4.  */
-  public static<T> T get(FST<T> fst, char[] input, int offset, int length) throws IOException {
-    assert fst.inputType == FST.INPUT_TYPE.BYTE4;
-
-    // TODO: would be nice not to alloc this on every lookup
-    final FST.Arc<T> arc = fst.getFirstArc(new FST.Arc<T>());
-
-    int charIdx = offset;
-    final int charLimit = offset + length;
-
-    // Accumulate output as we go
-    final T NO_OUTPUT = fst.outputs.getNoOutput();
-    T output = NO_OUTPUT;
-    while(charIdx < charLimit) {
-      final int utf32 = Character.codePointAt(input, charIdx);
-      charIdx += Character.charCount(utf32);
-
-      if (fst.findTargetArc(utf32, arc, arc) == null) {
-        return null;
-      } else if (arc.output != NO_OUTPUT) {
-        output = fst.outputs.add(output, arc.output);
-      }
-    }
-
-    if (fst.findTargetArc(FST.END_LABEL, arc, arc) == null) {
-      return null;
-    } else if (arc.output != NO_OUTPUT) {
-      return fst.outputs.add(output, arc.output);
-    } else {
-      return output;
-    }
-  }
-
-
-  /** Logically casts input to UTF32 ints then looks up the output
-   *  or null if the input is not accepted.  FST must be
-   *  INPUT_TYPE.BYTE4.  */
-  public static<T> T get(FST<T> fst, CharSequence input) throws IOException {
-    assert fst.inputType == FST.INPUT_TYPE.BYTE4;
-    
-    // TODO: would be nice not to alloc this on every lookup
-    final FST.Arc<T> arc = fst.getFirstArc(new FST.Arc<T>());
-
-    int charIdx = 0;
-    final int charLimit = input.length();
-
-    // Accumulate output as we go
-    final T NO_OUTPUT = fst.outputs.getNoOutput();
-    T output = NO_OUTPUT;
-
-    while(charIdx < charLimit) {
-      final int utf32 = Character.codePointAt(input, charIdx);
-      charIdx += Character.charCount(utf32);
-
-      if (fst.findTargetArc(utf32, arc, arc) == null) {
-        return null;
-      } else if (arc.output != NO_OUTPUT) {
-        output = fst.outputs.add(output, arc.output);
-      }
-    }
-
-    if (fst.findTargetArc(FST.END_LABEL, arc, arc) == null) {
-      return null;
-    } else if (arc.output != NO_OUTPUT) {
-      return fst.outputs.add(output, arc.output);
-    } else {
-      return output;
-    }
-  }
+  // TODO: maybe a CharsRef version for BYTE2
 
   /** Looks up the output for this input, or null if the
    *  input is not accepted */
@@ -381,4 +309,51 @@ public final class Util {
       return "0x" + Integer.toHexString(label);
     }
   }
+
+  /** Decodes the Unicode codepoints from the provided
+   *  CharSequence and places them in the provided scratch
+   *  IntsRef, which must not be null, returning it. */
+  public static IntsRef toUTF32(CharSequence s, IntsRef scratch) {
+    int charIdx = 0;
+    int intIdx = 0;
+    final int charLimit = s.length();
+    while(charIdx < charLimit) {
+      scratch.grow(intIdx+1);
+      final int utf32 = Character.codePointAt(s, charIdx);
+      scratch.ints[intIdx] = utf32;
+      charIdx += Character.charCount(utf32);
+      intIdx++;
+    }
+    scratch.length = intIdx;
+    return scratch;
+  }
+
+  /** Decodes the Unicode codepoints from the provided
+   *  char[] and places them in the provided scratch
+   *  IntsRef, which must not be null, returning it. */
+  public static IntsRef toUTF32(char[] s, int offset, int length, IntsRef scratch) {
+    int charIdx = offset;
+    int intIdx = 0;
+    final int charLimit = offset + length;
+    while(charIdx < charLimit) {
+      scratch.grow(intIdx+1);
+      final int utf32 = Character.codePointAt(s, charIdx);
+      scratch.ints[intIdx] = utf32;
+      charIdx += Character.charCount(utf32);
+      intIdx++;
+    }
+    scratch.length = intIdx;
+    return scratch;
+  }
+
+  /** Just takes unsigned byte values from the BytesRef and
+   *  converts into an IntsRef. */
+  public static IntsRef toIntsRef(BytesRef input, IntsRef scratch) {
+    scratch.grow(input.length);
+    for(int i=0;i<input.length;i++) {
+      scratch.ints[i] = input.bytes[i+input.offset] & 0xFF;
+    }
+    scratch.length = input.length;
+    return scratch;
+  }
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/site/build/site/fileformats.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/site/build/site/fileformats.html?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/site/build/site/fileformats.html (original)
+++ lucene/dev/branches/solrcloud/lucene/src/site/build/site/fileformats.html Wed Jan 18 22:28:07 2012
@@ -1740,10 +1740,6 @@ document.write("Last Published: " + docu
                             without term vectors.
                         </li>
                         
-<li>If the third lowest-order bit is set (0x04), term positions are stored with the term vectors.</li>
-                        
-<li>If the fourth lowest-order bit is set (0x08), term offsets are stored with the term vectors.</li>
-                        
 <li>If the fifth lowest-order bit is set (0x10), norms are omitted for the indexed field.</li>
                         
 <li>If the sixth lowest-order bit is set (0x20), payloads are stored for the indexed field.</li>
@@ -1908,7 +1904,7 @@ document.write("Last Published: " + docu
 </li>
                 
 </ol>
-<a name="N106F0"></a><a name="Term Dictionary"></a>
+<a name="N106EA"></a><a name="Term Dictionary"></a>
 <h3 class="boxed">Term Dictionary</h3>
 <p>
                     The term dictionary is represented as two files:
@@ -2100,7 +2096,7 @@ document.write("Last Published: " + docu
 </li>
                 
 </ol>
-<a name="N10774"></a><a name="Frequencies"></a>
+<a name="N1076E"></a><a name="Frequencies"></a>
 <h3 class="boxed">Frequencies</h3>
 <p>
                     The .frq file contains the lists of documents
@@ -2228,7 +2224,7 @@ document.write("Last Published: " + docu
                    entry in level-1. In the example has entry 15 on level 1 a pointer to entry 15 on level 0 and entry 31 on level 1 a pointer
                    to entry 31 on level 0.                   
                 </p>
-<a name="N107FC"></a><a name="Positions"></a>
+<a name="N107F6"></a><a name="Positions"></a>
 <h3 class="boxed">Positions</h3>
 <p>
                     The .prx file contains the lists of positions that
@@ -2298,7 +2294,7 @@ document.write("Last Published: " + docu
                     Payload. If PayloadLength is not stored, then this Payload has the same
                     length as the Payload at the previous position.
                 </p>
-<a name="N10838"></a><a name="Normalization Factors"></a>
+<a name="N10832"></a><a name="Normalization Factors"></a>
 <h3 class="boxed">Normalization Factors</h3>
 <p>There's a single .nrm file containing all norms:
                 </p>
@@ -2378,7 +2374,7 @@ document.write("Last Published: " + docu
                 </p>
 <p>Separate norm files are created (when adequate) for both compound and non compound segments.
                 </p>
-<a name="N10889"></a><a name="Term Vectors"></a>
+<a name="N10883"></a><a name="Term Vectors"></a>
 <h3 class="boxed">Term Vectors</h3>
 <p>
 		  Term Vector support is an optional on a field by
@@ -2514,7 +2510,7 @@ document.write("Last Published: " + docu
 </li>
                 
 </ol>
-<a name="N10925"></a><a name="Deleted Documents"></a>
+<a name="N1091F"></a><a name="Deleted Documents"></a>
 <h3 class="boxed">Deleted Documents</h3>
 <p>The .del file is
                     optional, and only exists when a segment contains deletions.
@@ -2578,7 +2574,7 @@ document.write("Last Published: " + docu
 </div>
 
         
-<a name="N1095F"></a><a name="Limitations"></a>
+<a name="N10959"></a><a name="Limitations"></a>
 <h2 class="boxed">Limitations</h2>
 <div class="section">
 <p>

Modified: lucene/dev/branches/solrcloud/lucene/src/site/src/documentation/content/xdocs/fileformats.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/site/src/documentation/content/xdocs/fileformats.xml?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/site/src/documentation/content/xdocs/fileformats.xml (original)
+++ lucene/dev/branches/solrcloud/lucene/src/site/src/documentation/content/xdocs/fileformats.xml Wed Jan 18 22:28:07 2012
@@ -1216,8 +1216,6 @@
                             bit is one for fields that have term vectors stored, and zero for fields
                             without term vectors.
                         </li>
-                        <li>If the third lowest-order bit is set (0x04), term positions are stored with the term vectors.</li>
-                        <li>If the fourth lowest-order bit is set (0x08), term offsets are stored with the term vectors.</li>
                         <li>If the fifth lowest-order bit is set (0x10), norms are omitted for the indexed field.</li>
                         <li>If the sixth lowest-order bit is set (0x20), payloads are stored for the indexed field.</li>
                         <li>If the seventh lowest-order bit is set (0x40), term frequencies and positions omitted for the indexed field.</li>

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWCodec.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWCodec.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWCodec.java Wed Jan 18 22:28:07 2012
@@ -17,8 +17,10 @@ package org.apache.lucene.codecs.preflex
  * limitations under the License.
  */
 
+import org.apache.lucene.codecs.FieldInfosFormat;
 import org.apache.lucene.codecs.NormsFormat;
 import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.lucene3x.Lucene3xCodec;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -29,6 +31,8 @@ import org.apache.lucene.util.LuceneTest
 public class PreFlexRWCodec extends Lucene3xCodec {
   private final PostingsFormat postings = new PreFlexRWPostingsFormat();
   private final NormsFormat norms = new PreFlexRWNormsFormat();
+  private final FieldInfosFormat fieldInfos = new PreFlexRWFieldInfosFormat();
+  private final TermVectorsFormat termVectors = new PreFlexRWTermVectorsFormat();
   
   @Override
   public PostingsFormat postingsFormat() {
@@ -47,4 +51,22 @@ public class PreFlexRWCodec extends Luce
       return super.normsFormat();
     }
   }
+
+  @Override
+  public FieldInfosFormat fieldInfosFormat() {
+    if (LuceneTestCase.PREFLEX_IMPERSONATION_IS_ACTIVE) {
+      return fieldInfos;
+    } else {
+      return super.fieldInfosFormat();
+    }
+  }
+
+  @Override
+  public TermVectorsFormat termVectorsFormat() {
+    if (LuceneTestCase.PREFLEX_IMPERSONATION_IS_ACTIVE) {
+      return termVectors;
+    } else {
+      return super.termVectorsFormat();
+    }
+  }
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWNormsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWNormsFormat.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWNormsFormat.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWNormsFormat.java Wed Jan 18 22:28:07 2012
@@ -21,11 +21,15 @@ import org.apache.lucene.codecs.PerDocCo
 import org.apache.lucene.codecs.lucene3x.Lucene3xNormsFormat;
 import org.apache.lucene.index.PerDocWriteState;
 
+/**
+ * @lucene.internal
+ * @lucene.experimental
+ */
 public class PreFlexRWNormsFormat extends Lucene3xNormsFormat {
 
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new PreFlexNormsConsumer(state.directory, state.segmentName, state.context);
+    return new PreFlexRWNormsConsumer(state.directory, state.segmentName, state.context);
   }
 
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWPostingsFormat.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWPostingsFormat.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/preflexrw/PreFlexRWPostingsFormat.java Wed Jan 18 22:28:07 2012
@@ -41,7 +41,7 @@ public class PreFlexRWPostingsFormat ext
   
   @Override
   public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
-    return new PreFlexFieldsWriter(state);
+    return new PreFlexRWFieldsWriter(state);
   }
 
   @Override

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java Wed Jan 18 22:28:07 2012
@@ -37,6 +37,7 @@ import org.apache.lucene.codecs.TermStat
 import org.apache.lucene.codecs.TermsConsumer;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldsEnum;
 import org.apache.lucene.index.IndexFileNames;
@@ -197,6 +198,9 @@ public class RAMOnlyPostingsFormat exten
 
     @Override
     public TermsConsumer addField(FieldInfo field) {
+      if (field.indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) {
+        throw new UnsupportedOperationException("this codec cannot index offsets");
+      }
       RAMField ramField = new RAMField(field.name);
       postings.fieldToTerms.put(field.name, ramField);
       termsConsumer.reset(ramField);
@@ -265,7 +269,9 @@ public class RAMOnlyPostingsFormat exten
     }
 
     @Override
-    public void addPosition(int position, BytesRef payload) {
+    public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) {
+      assert startOffset == -1;
+      assert endOffset == -1;
       current.positions[posUpto] = position;
       if (payload != null && payload.length > 0) {
         if (current.payloads == null) {
@@ -388,7 +394,10 @@ public class RAMOnlyPostingsFormat exten
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) {
+    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, boolean needsOffsets) {
+      if (needsOffsets) {
+        return null;
+      }
       return new RAMDocsAndPositionsEnum(ramField.termToDocs.get(current), liveDocs);
     }
   }
@@ -494,6 +503,16 @@ public class RAMOnlyPostingsFormat exten
     }
 
     @Override
+    public int startOffset() {
+      return -1;
+    }
+
+    @Override
+    public int endOffset() {
+      return -1;
+    }
+
+    @Override
     public boolean hasPayload() {
       return current.payloads != null && current.payloads[posUpto-1] != null;
     }

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/DocHelper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/DocHelper.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/DocHelper.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/DocHelper.java Wed Jan 18 22:28:07 2012
@@ -26,13 +26,13 @@ import java.util.Random;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.document.BinaryField;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.search.similarities.SimilarityProvider;
 import org.apache.lucene.store.Directory;
 
@@ -197,7 +197,7 @@ class DocHelper {
       LAZY_FIELD_BINARY_BYTES = "These are some binary field bytes".getBytes("UTF8");
     } catch (UnsupportedEncodingException e) {
     }
-    lazyFieldBinary = new BinaryField(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES);
+    lazyFieldBinary = new StoredField(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES);
     fields[fields.length - 2] = lazyFieldBinary;
     LARGE_LAZY_FIELD_TEXT = buffer.toString();
     largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, customType);

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java Wed Jan 18 22:28:07 2012
@@ -25,10 +25,10 @@ import java.util.Random;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.document.DocValuesField;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.IndexWriter; // javadoc
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -122,6 +122,10 @@ public class RandomIndexWriter implement
    * @see IndexWriter#addDocument(Iterable)
    */
   public <T extends IndexableField> void addDocument(final Iterable<T> doc) throws IOException {
+    addDocument(doc, w.getAnalyzer());
+  }
+
+  public <T extends IndexableField> void addDocument(final Iterable<T> doc, Analyzer a) throws IOException {
     if (doDocValues && doc instanceof Document) {
       randomPerDocFieldValues(r, (Document) doc);
     }
@@ -157,9 +161,9 @@ public class RandomIndexWriter implement
             }
           };
         }
-        });
+        }, a);
     } else {
-      w.addDocument(doc);
+      w.addDocument(doc, a);
     }
     
     maybeCommit();
@@ -172,7 +176,10 @@ public class RandomIndexWriter implement
     String name = "random_" + type.name() + "" + docValuesFieldPrefix;
     if ("Lucene3x".equals(codec.getName()) || doc.getField(name) != null)
         return;
-    DocValuesField docValuesField = new DocValuesField(name);
+    FieldType ft = new FieldType();
+    ft.setDocValueType(type);
+    ft.freeze();
+    final Field f;
     switch (type) {
     case BYTES_FIXED_DEREF:
     case BYTES_FIXED_STRAIGHT:
@@ -186,40 +193,38 @@ public class RandomIndexWriter implement
         fixedRef.grow(fixedBytesLength);
         fixedRef.length = fixedBytesLength;
       }
-      docValuesField.setBytes(fixedRef, type);
+      f = new Field(name, fixedRef, ft);
       break;
     case BYTES_VAR_DEREF:
     case BYTES_VAR_STRAIGHT:
     case BYTES_VAR_SORTED:
-      BytesRef ref = new BytesRef(_TestUtil.randomUnicodeString(random, 200));
-      docValuesField.setBytes(ref, type);
+      f = new Field(name, new BytesRef(_TestUtil.randomUnicodeString(random, 200)), ft);
       break;
     case FLOAT_32:
-      docValuesField.setFloat(random.nextFloat());
+      f = new Field(name, random.nextFloat(), ft);
       break;
     case FLOAT_64:
-      docValuesField.setFloat(random.nextDouble());
+      f = new Field(name, random.nextDouble(), ft);
       break;
     case VAR_INTS:
-      docValuesField.setInt(random.nextLong());
+      f = new Field(name, random.nextLong(), ft);
       break;
     case FIXED_INTS_16:
-      docValuesField.setInt(random.nextInt(Short.MAX_VALUE));
+      f = new Field(name, random.nextInt(Short.MAX_VALUE), ft);
       break;
     case FIXED_INTS_32:
-      docValuesField.setInt(random.nextInt());
+      f = new Field(name, random.nextInt(), ft);
       break;
     case FIXED_INTS_64:
-      docValuesField.setInt(random.nextLong());
+      f = new Field(name, random.nextLong(), ft);
       break;
     case FIXED_INTS_8:
-      docValuesField.setInt(random.nextInt(128));
+      f = new Field(name, random.nextInt(128), ft);
       break;
     default:
       throw new IllegalArgumentException("no such type: " + type);
     }
-
-    doc.add(docValuesField);
+    doc.add(f);
   }
 
   private void maybeCommit() throws IOException {

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/store/MockDirectoryWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/store/MockDirectoryWrapper.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/store/MockDirectoryWrapper.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/store/MockDirectoryWrapper.java Wed Jan 18 22:28:07 2012
@@ -68,6 +68,7 @@ public class MockDirectoryWrapper extend
   boolean noDeleteOpenFile = true;
   boolean preventDoubleWrite = true;
   boolean checkIndexOnClose = true;
+  boolean crossCheckTermVectorsOnClose = true;
   boolean trackDiskUsage = false;
   private Set<String> unSyncedFiles;
   private Set<String> createdFiles;
@@ -310,6 +311,15 @@ public class MockDirectoryWrapper extend
   public boolean getCheckIndexOnClose() {
     return checkIndexOnClose;
   }
+
+  public void setCrossCheckTermVectorsOnClose(boolean value) {
+    this.crossCheckTermVectorsOnClose = value;
+  }
+
+  public boolean getCrossCheckTermVectorsOnClose() {
+    return crossCheckTermVectorsOnClose;
+  }
+
   /**
    * If 0.0, no exceptions will be thrown.  Else this should
    * be a double 0.0 - 1.0.  We will randomly throw an
@@ -557,7 +567,7 @@ public class MockDirectoryWrapper extend
         if (LuceneTestCase.VERBOSE) {
           System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex");
         } 
-        _TestUtil.checkIndex(this);
+        _TestUtil.checkIndex(this, crossCheckTermVectorsOnClose);
 
         if (assertNoUnreferencedFilesOnClose) {
           // now look for unreferenced files:

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java Wed Jan 18 22:28:07 2012
@@ -914,12 +914,14 @@ public abstract class LuceneTestCase ext
       }
     }
     if (r.nextBoolean()) {
+      int maxNumThreadStates = rarely(r) ? _TestUtil.nextInt(r, 5, 20) // crazy value
+          : _TestUtil.nextInt(r, 1, 4); // reasonable value
       if (rarely(r)) {
-        // crazy value
-        c.setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(_TestUtil.nextInt(r, 5, 20)));
+        // random thread pool
+        c.setIndexerThreadPool(new RandomDocumentsWriterPerThreadPool(maxNumThreadStates, r));
       } else {
-        // reasonable value
-        c.setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(_TestUtil.nextInt(r, 1, 4)));
+        // random thread pool
+        c.setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(maxNumThreadStates));
       }
     }
 
@@ -1106,6 +1108,10 @@ public abstract class LuceneTestCase ext
       return new Field(name, value, type);
     }
 
+    // TODO: once all core & test codecs can index
+    // offsets, sometimes randomly turn on offsets if we are
+    // already indexing positions...
+
     FieldType newType = new FieldType(type);
     if (!newType.stored() && random.nextBoolean()) {
       newType.setStored(true); // randomly store it

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/_TestUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/_TestUtil.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/_TestUtil.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/_TestUtil.java Wed Jan 18 22:28:07 2012
@@ -155,8 +155,13 @@ public class _TestUtil {
    *  issues are hit, a RuntimeException is thrown; else,
    *  true is returned. */
   public static CheckIndex.Status checkIndex(Directory dir) throws IOException {
+    return checkIndex(dir, true);
+  }
+
+  public static CheckIndex.Status checkIndex(Directory dir, boolean crossCheckTermVectors) throws IOException {
     ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
     CheckIndex checker = new CheckIndex(dir);
+    checker.setCrossCheckTermVectors(crossCheckTermVectors);
     checker.setInfoStream(new PrintStream(bos), false);
     CheckIndex.Status indexStatus = checker.checkIndex(null);
     if (indexStatus == null || indexStatus.clean == false) {
@@ -567,7 +572,10 @@ public class _TestUtil {
     if (random.nextBoolean()) {
       if (random.nextBoolean()) {
         // TODO: cast re-use to D&PE if we can...?
-        final DocsAndPositionsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null);
+        DocsAndPositionsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, true);
+        if (docsAndPositions == null) {
+          docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, false);
+        }
         if (docsAndPositions != null) {
           return docsAndPositions;
         }
@@ -586,7 +594,10 @@ public class _TestUtil {
     if (random.nextBoolean()) {
       if (random.nextBoolean()) {
         // TODO: cast re-use to D&PE if we can...?
-        final DocsAndPositionsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null);
+        DocsAndPositionsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, true);
+        if (docsAndPositions == null) {
+          docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, false);
+        }
         if (docsAndPositions != null) {
           return docsAndPositions;
         }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java Wed Jan 18 22:28:07 2012
@@ -74,7 +74,8 @@ public class TestCachingTokenFilter exte
     DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
                                                                           MultiFields.getLiveDocs(reader),
                                                                           "preanalyzed",
-                                                                          new BytesRef("term1"));
+                                                                          new BytesRef("term1"),
+                                                                          false);
     assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
     assertEquals(1, termPositions.freq());
     assertEquals(0, termPositions.nextPosition());
@@ -82,7 +83,8 @@ public class TestCachingTokenFilter exte
     termPositions = MultiFields.getTermPositionsEnum(reader,
                                                      MultiFields.getLiveDocs(reader),
                                                      "preanalyzed",
-                                                     new BytesRef("term2"));
+                                                     new BytesRef("term2"),
+                                                     false);
     assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
     assertEquals(2, termPositions.freq());
     assertEquals(1, termPositions.nextPosition());
@@ -91,7 +93,8 @@ public class TestCachingTokenFilter exte
     termPositions = MultiFields.getTermPositionsEnum(reader,
                                                      MultiFields.getLiveDocs(reader),
                                                      "preanalyzed",
-                                                     new BytesRef("term3"));
+                                                     new BytesRef("term3"),
+                                                     false);
     assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
     assertEquals(1, termPositions.freq());
     assertEquals(2, termPositions.nextPosition());

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/codecs/lucene40/TestDocValues.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/codecs/lucene40/TestDocValues.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/codecs/lucene40/TestDocValues.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/codecs/lucene40/TestDocValues.java Wed Jan 18 22:28:07 2012
@@ -18,17 +18,21 @@ package org.apache.lucene.codecs.lucene4
  */
 
 import java.io.IOException;
+import java.io.Reader;
 import java.util.Comparator;
 
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.lucene40.values.Bytes;
 import org.apache.lucene.codecs.lucene40.values.Floats;
 import org.apache.lucene.codecs.lucene40.values.Ints;
-import org.apache.lucene.index.DocValue;
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValues.SortedSource;
 import org.apache.lucene.index.DocValues.Source;
 import org.apache.lucene.index.DocValues.Type;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Counter;
@@ -175,9 +179,9 @@ public class TestDocValues extends Lucen
       Directory dir = newDirectory();
       final Counter trackBytes = Counter.newCounter();
       DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.VAR_INTS, newIOContext(random));
-      valueHolder.intValue = minMax[i][0];
+      valueHolder.numberValue = minMax[i][0];
       w.add(0, valueHolder);
-      valueHolder.intValue = minMax[i][1];
+      valueHolder.numberValue = minMax[i][1];
       w.add(1, valueHolder);
       w.finish(2);
       assertEquals(0, trackBytes.get());
@@ -212,7 +216,7 @@ public class TestDocValues extends Lucen
     final Counter trackBytes = Counter.newCounter();
     DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_8, newIOContext(random));
     for (int i = 0; i < sourceArray.length; i++) {
-      valueHolder.intValue = (long) sourceArray[i];
+      valueHolder.numberValue = (long) sourceArray[i];
       w.add(i, valueHolder);
     }
     w.finish(sourceArray.length);
@@ -235,7 +239,7 @@ public class TestDocValues extends Lucen
     final Counter trackBytes = Counter.newCounter();
     DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_16, newIOContext(random));
     for (int i = 0; i < sourceArray.length; i++) {
-      valueHolder.intValue = (long) sourceArray[i];
+      valueHolder.numberValue = (long) sourceArray[i];
       w.add(i, valueHolder);
     }
     w.finish(sourceArray.length);
@@ -258,7 +262,7 @@ public class TestDocValues extends Lucen
     final Counter trackBytes = Counter.newCounter();
     DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_64, newIOContext(random));
     for (int i = 0; i < sourceArray.length; i++) {
-      valueHolder.intValue = sourceArray[i];
+      valueHolder.numberValue = sourceArray[i];
       w.add(i, valueHolder);
     }
     w.finish(sourceArray.length);
@@ -281,7 +285,7 @@ public class TestDocValues extends Lucen
     final Counter trackBytes = Counter.newCounter();
     DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_32, newIOContext(random));
     for (int i = 0; i < sourceArray.length; i++) {
-      valueHolder.intValue = (long) sourceArray[i];
+      valueHolder.numberValue = (long) sourceArray[i];
       w.add(i, valueHolder);
     }
     w.finish(sourceArray.length);
@@ -304,7 +308,7 @@ public class TestDocValues extends Lucen
     final Counter trackBytes = Counter.newCounter();
     DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_32);
     for (int i = 0; i < sourceArray.length; i++) {
-      valueHolder.floatValue = sourceArray[i];
+      valueHolder.numberValue = sourceArray[i];
       w.add(i, valueHolder);
     }
     w.finish(sourceArray.length);
@@ -327,7 +331,7 @@ public class TestDocValues extends Lucen
     final Counter trackBytes = Counter.newCounter();
     DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_64);
     for (int i = 0; i < sourceArray.length; i++) {
-      valueHolder.floatValue = sourceArray[i];
+      valueHolder.numberValue = sourceArray[i];
       w.add(i, valueHolder);
     }
     w.finish(sourceArray.length);
@@ -354,7 +358,7 @@ public class TestDocValues extends Lucen
       DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, type, newIOContext(random));
       for (int i = 0; i < NUM_VALUES; i++) {
         final long v = random.nextLong() % (1 + maxV);
-        valueHolder.intValue = values[i] = v;
+        valueHolder.numberValue = values[i] = v;
         w.add(i, valueHolder);
       }
       final int additionalDocs = 1 + random.nextInt(9);
@@ -377,20 +381,20 @@ public class TestDocValues extends Lucen
   }
 
   public void testFloats4() throws IOException {
-    runTestFloats(Type.FLOAT_32, 0.00001);
+    runTestFloats(Type.FLOAT_32);
   }
 
-  private void runTestFloats(Type type, double delta) throws IOException {
+  private void runTestFloats(Type type) throws IOException {
     DocValueHolder valueHolder = new DocValueHolder();
     Directory dir = newDirectory();
     final Counter trackBytes = Counter.newCounter();
     DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), type);
-    final int NUM_VALUES = 777 + random.nextInt(777);;
+    final int NUM_VALUES = 777 + random.nextInt(777);
     final double[] values = new double[NUM_VALUES];
     for (int i = 0; i < NUM_VALUES; i++) {
       final double v = type == Type.FLOAT_32 ? random.nextFloat() : random
           .nextDouble();
-      valueHolder.floatValue = values[i] = v;
+      valueHolder.numberValue = values[i] = v;
       w.add(i, valueHolder);
     }
     final int additionalValues = 1 + random.nextInt(10);
@@ -409,7 +413,7 @@ public class TestDocValues extends Lucen
   }
 
   public void testFloats8() throws IOException {
-    runTestFloats(Type.FLOAT_64, 0.0);
+    runTestFloats(Type.FLOAT_64);
   }
   
 
@@ -431,31 +435,49 @@ public class TestDocValues extends Lucen
     return getSource(values).asSortedSource();
   }
   
-  public static class DocValueHolder implements DocValue {
+  public static class DocValueHolder implements IndexableField {
     BytesRef bytes;
-    long intValue;
-    double floatValue;
+    Number numberValue;
     Comparator<BytesRef> comp;
+
+    @Override
+    public TokenStream tokenStream(Analyzer a) {
+      return null;
+    }
+
+    @Override
+    public float boost() {
+      return 0.0f;
+    }
+
+    @Override
+    public String name() {
+      return "test";
+    }
+
     @Override
-    public BytesRef getBytes() {
+    public BytesRef binaryValue() {
       return bytes;
     }
 
     @Override
-    public Comparator<BytesRef> bytesComparator() {
-      return comp;
+    public Number numericValue() {
+      return numberValue;
     }
 
     @Override
-    public double getFloat() {
-      return floatValue;
+    public String stringValue() {
+      return null;
     }
 
     @Override
-    public long getInt() {
-      return intValue;
+    public Reader readerValue() {
+      return null;
+    }
+
+    @Override
+    public IndexableFieldType fieldType() {
+      return null;
     }
-    
   }
-  
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java Wed Jan 18 22:28:07 2012
@@ -23,7 +23,6 @@ import java.util.Map;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.nestedpulsing.NestedPulsingPostingsFormat;
-import org.apache.lucene.codecs.pulsing.Pulsing40PostingsFormat;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.TextField;
@@ -70,7 +69,7 @@ public class TestPulsingReuse extends Lu
     DocsAndPositionsEnum posReuse = null;
     te = segment.terms("foo").iterator(null);
     while (te.next() != null) {
-      posReuse = te.docsAndPositions(null, posReuse);
+      posReuse = te.docsAndPositions(null, posReuse, false);
       allEnums.put(posReuse, true);
     }
     
@@ -112,7 +111,7 @@ public class TestPulsingReuse extends Lu
     DocsAndPositionsEnum posReuse = null;
     te = segment.terms("foo").iterator(null);
     while (te.next() != null) {
-      posReuse = te.docsAndPositions(null, posReuse);
+      posReuse = te.docsAndPositions(null, posReuse, false);
       allEnums.put(posReuse, true);
     }
     

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java Wed Jan 18 22:28:07 2012
@@ -37,7 +37,7 @@ public class TestBinaryDocument extends 
   {
     FieldType ft = new FieldType();
     ft.setStored(true);
-    IndexableField binaryFldStored = new BinaryField("binaryStored", binaryValStored.getBytes());
+    IndexableField binaryFldStored = new StoredField("binaryStored", binaryValStored.getBytes());
     IndexableField stringFldStored = new Field("stringStored", binaryValStored, ft);
 
     Document doc = new Document();
@@ -75,8 +75,8 @@ public class TestBinaryDocument extends 
   }
   
   public void testCompressionTools() throws Exception {
-    IndexableField binaryFldCompressed = new BinaryField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()));
-    IndexableField stringFldCompressed = new BinaryField("stringCompressed", CompressionTools.compressString(binaryValCompressed));
+    IndexableField binaryFldCompressed = new StoredField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()));
+    IndexableField stringFldCompressed = new StoredField("stringCompressed", CompressionTools.compressString(binaryValCompressed));
     
     Document doc = new Document();
     

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/document/TestDocument.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/document/TestDocument.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/document/TestDocument.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/document/TestDocument.java Wed Jan 18 22:28:07 2012
@@ -1,17 +1,5 @@
 package org.apache.lucene.document;
 
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -29,6 +17,26 @@ import org.apache.lucene.util.LuceneTest
  * limitations under the License.
  */
 
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
+
 /**
  * Tests {@link Document} class.
  */
@@ -43,8 +51,8 @@ public class TestDocument extends Lucene
     FieldType ft = new FieldType();
     ft.setStored(true);
     IndexableField stringFld = new Field("string", binaryVal, ft);
-    IndexableField binaryFld = new BinaryField("binary", binaryVal.getBytes());
-    IndexableField binaryFld2 = new BinaryField("binary", binaryVal2.getBytes());
+    IndexableField binaryFld = new StoredField("binary", binaryVal.getBytes());
+    IndexableField binaryFld2 = new StoredField("binary", binaryVal2.getBytes());
     
     doc.add(stringFld);
     doc.add(binaryFld);
@@ -274,20 +282,82 @@ public class TestDocument extends Lucene
     assertEquals("did not see all IDs", 7, result);
   }
   
-  public void testFieldSetValueChangeBinary() {
-    Field field1 = new BinaryField("field1", new byte[0]);
-    Field field2 = new Field("field2", "", TextField.TYPE_STORED);
+  // LUCENE-3616
+  public void testInvalidFields() {
     try {
-      field1.setValue("abc");
-      fail("did not hit expected exception");
+      new Field("foo", new Tokenizer() {
+        @Override
+        public boolean incrementToken() {
+          return false;
+        }}, StringField.TYPE_STORED);
+      fail("did not hit expected exc");
     } catch (IllegalArgumentException iae) {
       // expected
     }
-    try {
-      field2.setValue(new byte[0]);
-      fail("did not hit expected exception");
-    } catch (IllegalArgumentException iae) {
-      // expected
+  }
+
+  // LUCENE-3682
+  public void testTransitionAPI() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, dir);
+
+    Document doc = new Document();
+    doc.add(new Field("stored", "abc", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field("stored_indexed", "abc xyz", Field.Store.YES, Field.Index.NOT_ANALYZED));
+    doc.add(new Field("stored_tokenized", "abc xyz", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(new Field("indexed", "abc xyz", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    doc.add(new Field("tokenized", "abc xyz", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(new Field("tokenized_reader", new StringReader("abc xyz")));
+    doc.add(new Field("tokenized_tokenstream", w.w.getAnalyzer().tokenStream("tokenized_tokenstream", new StringReader("abc xyz"))));
+    doc.add(new Field("binary", new byte[10]));
+    doc.add(new Field("tv", "abc xyz", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
+    doc.add(new Field("tv_pos", "abc xyz", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+    doc.add(new Field("tv_off", "abc xyz", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
+    doc.add(new Field("tv_pos_off", "abc xyz", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    w.addDocument(doc);
+    IndexReader r = w.getReader();
+    w.close();
+
+    doc = r.document(0);
+    // 4 stored fields
+    assertEquals(4, doc.getFields().size());
+    assertEquals("abc", doc.get("stored"));
+    assertEquals("abc xyz", doc.get("stored_indexed"));
+    assertEquals("abc xyz", doc.get("stored_tokenized"));
+    final BytesRef br = doc.getBinaryValue("binary");
+    assertNotNull(br);
+    assertEquals(10, br.length);
+
+    IndexSearcher s = new IndexSearcher(r);
+    assertEquals(1, s.search(new TermQuery(new Term("stored_indexed", "abc xyz")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("stored_tokenized", "abc")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("stored_tokenized", "xyz")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("indexed", "abc xyz")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("tokenized", "abc")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("tokenized", "xyz")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("tokenized_reader", "abc")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("tokenized_reader", "xyz")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("tokenized_tokenstream", "abc")), 1).totalHits);
+    assertEquals(1, s.search(new TermQuery(new Term("tokenized_tokenstream", "xyz")), 1).totalHits);
+
+    for(String field : new String[] {"tv", "tv_pos", "tv_off", "tv_pos_off"}) {
+      Fields tvFields = r.getTermVectors(0);
+      Terms tvs = tvFields.terms(field);
+      assertNotNull(tvs);
+      assertEquals(2, tvs.getUniqueTermCount());
+      TermsEnum tvsEnum = tvs.iterator(null);
+      assertEquals(new BytesRef("abc"), tvsEnum.next());
+      final DocsAndPositionsEnum dpEnum = tvsEnum.docsAndPositions(null, null, false);
+      if (field.equals("tv")) {
+        assertNull(dpEnum);
+      } else {
+        assertNotNull(dpEnum);
+      }
+      assertEquals(new BytesRef("xyz"), tvsEnum.next());
+      assertNull(tvsEnum.next());
     }
+
+    r.close();
+    dir.close();
   }
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Wed Jan 18 22:28:07 2012
@@ -1256,9 +1256,7 @@ public class TestAddIndexes extends Luce
     RandomIndexWriter w = new RandomIndexWriter(random, d1);
     Document doc = new Document();
     doc.add(newField("id", "1", StringField.TYPE_STORED));
-    DocValuesField dv = new DocValuesField("dv");
-    dv.setInt(1);
-    doc.add(dv);
+    doc.add(new DocValuesField("dv", 1, DocValues.Type.VAR_INTS));
     w.addDocument(doc);
     IndexReader r1 = w.getReader();
     w.close();
@@ -1267,9 +1265,7 @@ public class TestAddIndexes extends Luce
     w = new RandomIndexWriter(random, d2);
     doc = new Document();
     doc.add(newField("id", "2", StringField.TYPE_STORED));
-    dv = new DocValuesField("dv");
-    dv.setInt(2);
-    doc.add(dv);
+    doc.add(new DocValuesField("dv", 2, DocValues.Type.VAR_INTS));
     w.addDocument(doc);
     IndexReader r2 = w.getReader();
     w.close();

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Wed Jan 18 22:28:07 2012
@@ -29,8 +29,6 @@ import java.util.Map;
 import java.util.Random;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.FieldInfosReader;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
@@ -45,9 +43,7 @@ import org.apache.lucene.search.IndexSea
 import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.store.CompoundFileDirectory;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -544,8 +540,8 @@ public class TestBackwardsCompatibility 
     doc.add(new Field("content2", "here is more content with aaa aaa aaa", customType2));
     doc.add(new Field("fie\u2C77ld", "field with non-ascii name", customType2));
     // add numeric fields, to test if flex preserves encoding
-    doc.add(new NumericField("trieInt", 4).setIntValue(id));
-    doc.add(new NumericField("trieLong", 4).setLongValue(id));
+    doc.add(new NumericField("trieInt", id));
+    doc.add(new NumericField("trieLong", (long) id));
     writer.addDocument(doc);
   }
 
@@ -640,12 +636,12 @@ public class TestBackwardsCompatibility 
       assertEquals("wrong number of hits", 34, hits.length);
       
       // check decoding into field cache
-      int[] fci = FieldCache.DEFAULT.getInts(searcher.getIndexReader(), "trieInt", false);
+      int[] fci = FieldCache.DEFAULT.getInts(new SlowMultiReaderWrapper(searcher.getIndexReader()), "trieInt", false);
       for (int val : fci) {
         assertTrue("value in id bounds", val >= 0 && val < 35);
       }
       
-      long[] fcl = FieldCache.DEFAULT.getLongs(searcher.getIndexReader(), "trieLong", false);
+      long[] fcl = FieldCache.DEFAULT.getLongs(new SlowMultiReaderWrapper(searcher.getIndexReader()), "trieLong", false);
       for (long val : fcl) {
         assertTrue("value in id bounds", val >= 0L && val < 35L);
       }
@@ -738,5 +734,16 @@ public class TestBackwardsCompatibility 
       dir.close();
     }
   }
+  
+  public static final String surrogatesIndexName = "index.36.surrogates.zip";
+
+  public void testSurrogates() throws Exception {
+    File oldIndexDir = _TestUtil.getTempDir("surrogates");
+    _TestUtil.unzip(getDataFile(surrogatesIndexName), oldIndexDir);
+    Directory dir = newFSDirectory(oldIndexDir);
+    // TODO: more tests
+    _TestUtil.checkIndex(dir);
+    dir.close();
+  }
 
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestCodecs.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestCodecs.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestCodecs.java Wed Jan 18 22:28:07 2012
@@ -166,7 +166,7 @@ public class TestCodecs extends LuceneTe
           totTF += positions[i].length;
           for(int j=0;j<positions[i].length;j++) {
             final PositionData pos = positions[i][j];
-            postingsConsumer.addPosition(pos.pos, pos.payload);
+            postingsConsumer.addPosition(pos.pos, pos.payload, -1, -1);
           }
           postingsConsumer.finishDoc();
         }
@@ -480,7 +480,7 @@ public class TestCodecs extends LuceneTe
         if (field.omitTF) {
           this.verifyDocs(term.docs, term.positions, _TestUtil.docs(random, termsEnum, null, null, false), false);
         } else {
-          this.verifyDocs(term.docs, term.positions, termsEnum.docsAndPositions(null, null), true);
+          this.verifyDocs(term.docs, term.positions, termsEnum.docsAndPositions(null, null, false), true);
         }
 
         // Test random seek by ord:
@@ -500,7 +500,7 @@ public class TestCodecs extends LuceneTe
           if (field.omitTF) {
             this.verifyDocs(term.docs, term.positions, _TestUtil.docs(random, termsEnum, null, null, false), false);
           } else {
-            this.verifyDocs(term.docs, term.positions, termsEnum.docsAndPositions(null, null), true);
+            this.verifyDocs(term.docs, term.positions, termsEnum.docsAndPositions(null, null, false), true);
           }
         }
 
@@ -552,7 +552,7 @@ public class TestCodecs extends LuceneTe
             final DocsEnum docsAndFreqs;
             final DocsAndPositionsEnum postings;
             if (!field.omitTF) {
-              postings = termsEnum.docsAndPositions(null, null);
+              postings = termsEnum.docsAndPositions(null, null, false);
               if (postings != null) {
                 docs = docsAndFreqs = postings;
               } else {

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java Wed Jan 18 22:28:07 2012
@@ -20,10 +20,10 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.BinaryField;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
@@ -189,7 +189,7 @@ public class TestConsistentFieldNumbers 
                 : NoMergePolicy.COMPOUND_FILES));
         Document d = new Document();
         d.add(new Field("f1", "d2 first field", TextField.TYPE_STORED));
-        d.add(new BinaryField("f3", new byte[] { 1, 2, 3 }));
+        d.add(new StoredField("f3", new byte[] { 1, 2, 3 }));
         writer.addDocument(d);
         writer.close();
         SegmentInfos sis = new SegmentInfos();
@@ -212,7 +212,7 @@ public class TestConsistentFieldNumbers 
         Document d = new Document();
         d.add(new Field("f1", "d3 first field", TextField.TYPE_STORED));
         d.add(new Field("f2", "d3 second field", TextField.TYPE_STORED));
-        d.add(new BinaryField("f3", new byte[] { 1, 2, 3, 4, 5 }));
+        d.add(new StoredField("f3", new byte[] { 1, 2, 3, 4, 5 }));
         writer.addDocument(d);
         writer.close();
         SegmentInfos sis = new SegmentInfos();
@@ -296,8 +296,6 @@ public class TestConsistentFieldNumbers 
         Field expected = getField(Integer.parseInt(fi.name));
         assertEquals(expected.fieldType().indexed(), fi.isIndexed);
         assertEquals(expected.fieldType().storeTermVectors(), fi.storeTermVector);
-        assertEquals(expected.fieldType().storeTermVectorPositions(), fi.storePositionWithTermVector);
-        assertEquals(expected.fieldType().storeTermVectorOffsets(), fi.storeOffsetWithTermVector);
       }
     }
 

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDoc.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDoc.java Wed Jan 18 22:28:07 2012
@@ -234,7 +234,7 @@ public class TestDoc extends LuceneTestC
           out.print("  term=" + field + ":" + tis.term());
           out.println("    DF=" + tis.docFreq());
 
-          DocsAndPositionsEnum positions = tis.docsAndPositions(reader.getLiveDocs(), null);
+          DocsAndPositionsEnum positions = tis.docsAndPositions(reader.getLiveDocs(), null, false);
 
           while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
             out.print(" doc=" + positions.docID());

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java Wed Jan 18 22:28:07 2012
@@ -124,8 +124,7 @@ public class TestDocTermOrds extends Luc
     for(int id=0;id<NUM_DOCS;id++) {
       Document doc = new Document();
 
-      NumericField idField = new NumericField("id");
-      doc.add(idField.setIntValue(id));
+      doc.add(new NumericField("id", id));
       
       final int termCount = _TestUtil.nextInt(random, 0, 20*RANDOM_MULTIPLIER);
       while(ordsForDocSet.size() < termCount) {
@@ -169,7 +168,7 @@ public class TestDocTermOrds extends Luc
     if (VERBOSE) {
       System.out.println("TEST: top reader");
     }
-    verify(r, idToOrds, termsArray, null);
+    verify(new SlowMultiReaderWrapper(r), idToOrds, termsArray, null);
 
     FieldCache.DEFAULT.purge(r);
 
@@ -221,8 +220,7 @@ public class TestDocTermOrds extends Luc
     for(int id=0;id<NUM_DOCS;id++) {
       Document doc = new Document();
 
-      NumericField idField = new NumericField("id");
-      doc.add(idField.setIntValue(id));
+      doc.add(new NumericField("id", id));
       
       final int termCount = _TestUtil.nextInt(random, 0, 20*RANDOM_MULTIPLIER);
       while(ordsForDocSet.size() < termCount) {
@@ -287,7 +285,7 @@ public class TestDocTermOrds extends Luc
       if (VERBOSE) {
         System.out.println("TEST: top reader");
       }
-      verify(r, idToOrdsPrefix, termsArray, prefixRef);
+      verify(new SlowMultiReaderWrapper(r), idToOrdsPrefix, termsArray, prefixRef);
     }
 
     FieldCache.DEFAULT.purge(r);

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocValuesIndexing.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocValuesIndexing.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocValuesIndexing.java Wed Jan 18 22:28:07 2012
@@ -21,10 +21,8 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.Comparator;
 import java.util.EnumSet;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -37,17 +35,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.DocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValues.SortedSource;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.LogDocMergePolicy;
-import org.apache.lucene.index.LogMergePolicy;
-import org.apache.lucene.index.MultiDocValues;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.DocValues.Source;
 import org.apache.lucene.index.DocValues.Type;
 import org.apache.lucene.search.*;
@@ -85,9 +73,7 @@ public class TestDocValuesIndexing exten
     IndexWriter writer = new IndexWriter(dir, writerConfig(false));
     for (int i = 0; i < 5; i++) {
       Document doc = new Document();
-      DocValuesField valuesField = new DocValuesField("docId");
-      valuesField.setInt(i);
-      doc.add(valuesField);
+      doc.add(new DocValuesField("docId", i, DocValues.Type.VAR_INTS));
       doc.add(new TextField("docId", "" + i));
       writer.addDocument(doc);
     }
@@ -576,17 +562,47 @@ public class TestDocValuesIndexing exten
       Type.FLOAT_32,
       Type.FLOAT_64);
 
-  private FixedBitSet indexValues(IndexWriter w, int numValues, Type value,
+  private FixedBitSet indexValues(IndexWriter w, int numValues, Type valueType,
       List<Type> valueVarList, boolean withDeletions, int bytesSize)
       throws CorruptIndexException, IOException {
-    final boolean isNumeric = NUMERICS.contains(value);
+    final boolean isNumeric = NUMERICS.contains(valueType);
     FixedBitSet deleted = new FixedBitSet(numValues);
     Document doc = new Document();
-    DocValuesField valField = new DocValuesField(value.name());
+    final DocValuesField valField;
+    if (isNumeric) {
+      switch (valueType) {
+      case VAR_INTS:
+        valField = new DocValuesField(valueType.name(), (long) 0, valueType);
+        break;
+      case FIXED_INTS_16:
+        valField = new DocValuesField(valueType.name(), (short) 0, valueType);
+        break;
+      case FIXED_INTS_32:
+        valField = new DocValuesField(valueType.name(), 0, valueType);
+        break;
+      case FIXED_INTS_64:
+        valField = new DocValuesField(valueType.name(), (long) 0, valueType);
+        break;
+      case FIXED_INTS_8:
+        valField = new DocValuesField(valueType.name(), (byte) 0, valueType);
+        break;
+      case FLOAT_32:
+        valField = new DocValuesField(valueType.name(), (float) 0, valueType);
+        break;
+      case FLOAT_64:
+        valField = new DocValuesField(valueType.name(), (double) 0, valueType);
+        break;
+      default:
+        valField = null;
+        fail("unhandled case");
+      }
+    } else {
+      valField = new DocValuesField(valueType.name(), new BytesRef(), valueType);
+    }
     doc.add(valField);
     final BytesRef bytesRef = new BytesRef();
 
-    final String idBase = value.name() + "_";
+    final String idBase = valueType.name() + "_";
     final byte[] b = new byte[bytesSize];
     if (bytesRef != null) {
       bytesRef.bytes = b;
@@ -596,38 +612,37 @@ public class TestDocValuesIndexing exten
     byte upto = 0;
     for (int i = 0; i < numValues; i++) {
       if (isNumeric) {
-        switch (value) {
+        switch (valueType) {
         case VAR_INTS:
-          valField.setInt((long)i);
+          valField.setValue((long)i);
           break;
         case FIXED_INTS_16:
-          valField.setInt((short)i, random.nextInt(10) != 0);
+          valField.setValue((short)i);
           break;
         case FIXED_INTS_32:
-          valField.setInt(i, random.nextInt(10) != 0);
+          valField.setValue(i);
           break;
         case FIXED_INTS_64:
-          valField.setInt((long)i, random.nextInt(10) != 0);
+          valField.setValue((long)i);
           break;
         case FIXED_INTS_8:
-          valField.setInt((byte)(0xFF & (i % 128)), random.nextInt(10) != 0);
+          valField.setValue((byte)(0xFF & (i % 128)));
           break;
         case FLOAT_32:
-          valField.setFloat(2.0f * i);
+          valField.setValue(2.0f * i);
           break;
         case FLOAT_64:
-          valField.setFloat(2.0d * i);
+          valField.setValue(2.0d * i);
           break;
-       
         default:
-          fail("unexpected value " + value);
+          fail("unexpected value " + valueType);
         }
       } else {
         for (int j = 0; j < b.length; j++) {
           b[j] = upto++;
         }
         if (bytesRef != null) {
-          valField.setBytes(bytesRef, value);
+          valField.setValue(bytesRef);
         }
       }
       doc.removeFields("id");
@@ -637,11 +652,11 @@ public class TestDocValuesIndexing exten
       if (i % 7 == 0) {
         if (withDeletions && random.nextBoolean()) {
           Type val = valueVarList.get(random.nextInt(1 + valueVarList
-              .indexOf(value)));
-          final int randInt = val == value ? random.nextInt(1 + i) : random
+              .indexOf(valueType)));
+          final int randInt = val == valueType ? random.nextInt(1 + i) : random
               .nextInt(numValues);
           w.deleteDocuments(new Term("id", val.name() + "_" + randInt));
-          if (val == value) {
+          if (val == valueType) {
             deleted.set(randInt);
           }
         }
@@ -663,8 +678,7 @@ public class TestDocValuesIndexing exten
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random, d);
     Document doc = new Document();
-    DocValuesField f = new DocValuesField("field");
-    f.setInt(17);
+    DocValuesField f = new DocValuesField("field", 17, Type.VAR_INTS);
     // Index doc values are single-valued so we should not
     // be able to add same field more than once:
     doc.add(f);
@@ -691,14 +705,11 @@ public class TestDocValuesIndexing exten
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random, d);
     Document doc = new Document();
-    DocValuesField f = new DocValuesField("field");
-    f.setInt(17);
     // Index doc values are single-valued so we should not
     // be able to add same field more than once:
-    doc.add(f);
-    DocValuesField f2 = new DocValuesField("field");
-    f2.setFloat(22.0);
-    doc.add(f2);
+    Field f;
+    doc.add(f = new DocValuesField("field", 17, Type.VAR_INTS));
+    doc.add(new DocValuesField("field", 22.0, Type.FLOAT_32));
     try {
       w.addDocument(doc);
       fail("didn't hit expected exception");
@@ -725,7 +736,6 @@ public class TestDocValuesIndexing exten
       IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT,
           new MockAnalyzer(random));
       IndexWriter w = new IndexWriter(d, cfg);
-      Comparator<BytesRef> comp = BytesRef.getUTF8SortedAsUnicodeComparator();
       int numDocs = atLeast(100);
       BytesRefHash hash = new BytesRefHash();
       Map<String, String> docToString = new HashMap<String, String>();
@@ -733,14 +743,12 @@ public class TestDocValuesIndexing exten
       for (int i = 0; i < numDocs; i++) {
         Document doc = new Document();
         doc.add(newField("id", "" + i, TextField.TYPE_STORED));
-        DocValuesField f = new DocValuesField("field");
         String string =fixed ? _TestUtil.randomFixedByteLengthUnicodeString(random,
             len) : _TestUtil.randomRealisticUnicodeString(random, 1, len);
-        hash.add(new BytesRef(string));
+        BytesRef br = new BytesRef(string);
+        doc.add(new DocValuesField("field", br, type));
+        hash.add(br);
         docToString.put("" + i, string);
-
-        f.setBytes(new BytesRef(string), type, comp);
-        doc.add(f);
         w.addDocument(doc);
       }
       if (rarely()) {
@@ -763,13 +771,12 @@ public class TestDocValuesIndexing exten
         Document doc = new Document();
         String id = "" + i + numDocs;
         doc.add(newField("id", id, TextField.TYPE_STORED));
-        DocValuesField f = new DocValuesField("field");
         String string = fixed ? _TestUtil.randomFixedByteLengthUnicodeString(random,
             len) : _TestUtil.randomRealisticUnicodeString(random, 1, len);
-        hash.add(new BytesRef(string));
+        BytesRef br = new BytesRef(string);
+        hash.add(br);
         docToString.put(id, string);
-        f.setBytes(new BytesRef(string), type, comp);
-        doc.add(f);
+        doc.add( new DocValuesField("field", br, type));
         w.addDocument(doc);
       }
       w.commit();
@@ -777,7 +784,7 @@ public class TestDocValuesIndexing exten
       DocValues docValues = MultiDocValues.getDocValues(reader, "field");
       Source source = getSource(docValues);
       SortedSource asSortedSource = source.asSortedSource();
-      int[] sort = hash.sort(comp);
+      int[] sort = hash.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
       BytesRef expected = new BytesRef();
       BytesRef actual = new BytesRef();
       assertEquals(hash.size(), asSortedSource.getValueCount());

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java Wed Jan 18 22:28:07 2012
@@ -96,7 +96,7 @@ public class TestDocsAndPositions extend
 
   public DocsAndPositionsEnum getDocsAndPositions(IndexReader reader,
       BytesRef bytes, Bits liveDocs) throws IOException {
-      return reader.termPositionsEnum(null, fieldName, bytes);
+    return reader.termPositionsEnum(null, fieldName, bytes, false);
   }
 
   /**
@@ -358,7 +358,7 @@ public class TestDocsAndPositions extend
     writer.addDocument(doc);
     IndexReader reader = writer.getReader();
     IndexReader r = getOnlySegmentReader(reader);
-    DocsAndPositionsEnum disi = r.termPositionsEnum(null, "foo", new BytesRef("bar"));
+    DocsAndPositionsEnum disi = r.termPositionsEnum(null, "foo", new BytesRef("bar"), false);
     int docid = disi.docID();
     assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
     assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -366,7 +366,7 @@ public class TestDocsAndPositions extend
     // now reuse and check again
     TermsEnum te = r.terms("foo").iterator(null);
     assertTrue(te.seekExact(new BytesRef("bar"), true));
-    disi = te.docsAndPositions(null, disi);
+    disi = te.docsAndPositions(null, disi, false);
     docid = disi.docID();
     assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
     assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java?rev=1233096&r1=1233095&r2=1233096&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java Wed Jan 18 22:28:07 2012
@@ -94,7 +94,7 @@ public class TestDocumentWriter extends 
 
     // test that the norms are not present in the segment if
     // omitNorms is true
-    for (FieldInfo fi : reader.fieldInfos()) {
+    for (FieldInfo fi : reader.getFieldInfos()) {
       if (fi.isIndexed) {
         assertTrue(fi.omitNorms == !reader.hasNorms(fi.name));
       }
@@ -128,7 +128,7 @@ public class TestDocumentWriter extends 
     SegmentReader reader = new SegmentReader(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
 
     DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
-                                                                          "repeated", new BytesRef("repeated"));
+                                                                          "repeated", new BytesRef("repeated"), false);
     assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
     int freq = termPositions.freq();
     assertEquals(2, freq);
@@ -199,7 +199,7 @@ public class TestDocumentWriter extends 
     writer.close();
     SegmentReader reader = new SegmentReader(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
 
-    DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
+    DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"), false);
     assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
     int freq = termPositions.freq();
     assertEquals(3, freq);
@@ -243,18 +243,18 @@ public class TestDocumentWriter extends 
     writer.close();
     SegmentReader reader = new SegmentReader(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
 
-    DocsAndPositionsEnum termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term1"));
+    DocsAndPositionsEnum termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term1"), false);
     assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
     assertEquals(1, termPositions.freq());
     assertEquals(0, termPositions.nextPosition());
 
-    termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term2"));
+    termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term2"), false);
     assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
     assertEquals(2, termPositions.freq());
     assertEquals(1, termPositions.nextPosition());
     assertEquals(3, termPositions.nextPosition());
     
-    termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term3"));
+    termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term3"), false);
     assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
     assertEquals(1, termPositions.freq());
     assertEquals(2, termPositions.nextPosition());
@@ -327,7 +327,7 @@ public class TestDocumentWriter extends 
     _TestUtil.checkIndex(dir);
 
     SegmentReader reader = getOnlySegmentReader(IndexReader.open(dir));
-    FieldInfos fi = reader.fieldInfos();
+    FieldInfos fi = reader.getFieldInfos();
     // f1
     assertFalse("f1 should have no norms", reader.hasNorms("f1"));
     assertEquals("omitTermFreqAndPositions field bit should not be set for f1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, fi.fieldInfo("f1").indexOptions);