You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2014/10/24 04:43:45 UTC

svn commit: r1633991 [2/4] - in /lucene/dev/trunk: ./ dev-tools/ lucene/ lucene/analysis/ lucene/analysis/common/ lucene/backward-codecs/ lucene/backward-codecs/src/java/org/apache/lucene/codecs/blocktree/ lucene/backward-codecs/src/java/org/apache/luc...

Modified: lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java Fri Oct 24 02:43:41 2014
@@ -90,7 +90,7 @@ import org.apache.lucene.util.fst.Util;
  *  <li>TermFST --&gt; {@link FST FST&lt;TermData&gt;}</li>
  *  <li>TermData --&gt; Flag, BytesSize?, LongDelta<sup>LongsSize</sup>?, Byte<sup>BytesSize</sup>?, 
  *                      &lt; DocFreq[Same?], (TotalTermFreq-DocFreq) &gt; ? </li>
- *  <li>Header --&gt; {@link CodecUtil#writeSegmentHeader SegmentHeader}</li>
+ *  <li>Header --&gt; {@link CodecUtil#writeIndexHeader IndexHeader}</li>
  *  <li>DirOffset --&gt; {@link DataOutput#writeLong Uint64}</li>
  *  <li>DocFreq, LongsSize, BytesSize, NumFields,
  *        FieldNumber, DocCount --&gt; {@link DataOutput#writeVInt VInt}</li>
@@ -142,10 +142,10 @@ public class FSTTermsWriter extends Fiel
 
     boolean success = false;
     try {
-      CodecUtil.writeSegmentHeader(out, TERMS_CODEC_NAME, TERMS_VERSION_CURRENT,
+      CodecUtil.writeIndexHeader(out, TERMS_CODEC_NAME, TERMS_VERSION_CURRENT,
                                         state.segmentInfo.getId(), state.segmentSuffix);   
 
-      this.postingsWriter.init(out); 
+      this.postingsWriter.init(out, state); 
       success = true;
     } finally {
       if (!success) {

Modified: lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesConsumer.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesConsumer.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesConsumer.java Fri Oct 24 02:43:41 2014
@@ -74,10 +74,10 @@ class MemoryDocValuesConsumer extends Do
     try {
       String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
       data = state.directory.createOutput(dataName, state.context);
-      CodecUtil.writeSegmentHeader(data, dataCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
+      CodecUtil.writeIndexHeader(data, dataCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
       String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
       meta = state.directory.createOutput(metaName, state.context);
-      CodecUtil.writeSegmentHeader(meta, metaCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
+      CodecUtil.writeIndexHeader(meta, metaCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
       success = true;
     } finally {
       if (!success) {

Modified: lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java Fri Oct 24 02:43:41 2014
@@ -146,7 +146,7 @@ class MemoryDocValuesProducer extends Do
     ChecksumIndexInput in = state.directory.openChecksumInput(metaName, state.context);
     boolean success = false;
     try {
-      version = CodecUtil.checkSegmentHeader(in, metaCodec, VERSION_START, VERSION_CURRENT,
+      version = CodecUtil.checkIndexHeader(in, metaCodec, VERSION_START, VERSION_CURRENT,
                                                  state.segmentInfo.getId(), state.segmentSuffix);
       numEntries = readFields(in, state.fieldInfos);
       CodecUtil.checkFooter(in);
@@ -164,7 +164,7 @@ class MemoryDocValuesProducer extends Do
     this.data = state.directory.openInput(dataName, state.context);
     success = false;
     try {
-      final int version2 = CodecUtil.checkSegmentHeader(data, dataCodec, VERSION_START, VERSION_CURRENT,
+      final int version2 = CodecUtil.checkIndexHeader(data, dataCodec, VERSION_START, VERSION_CURRENT,
                                                               state.segmentInfo.getId(), state.segmentSuffix);
       if (version != version2) {
         throw new CorruptIndexException("Format versions mismatch: meta=" + version + ", data=" + version2, data);

Modified: lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java Fri Oct 24 02:43:41 2014
@@ -288,7 +288,7 @@ public final class MemoryPostingsFormat 
       out = state.directory.createOutput(fileName, state.context);
       boolean success = false;
       try {
-        CodecUtil.writeSegmentHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
+        CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
         success = true;
       } finally {
         if (!success) {
@@ -981,7 +981,7 @@ public final class MemoryPostingsFormat 
     try (ChecksumIndexInput in = state.directory.openChecksumInput(fileName, IOContext.READONCE)) {
       Throwable priorE = null;
       try {
-        CodecUtil.checkSegmentHeader(in, CODEC_NAME, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
+        CodecUtil.checkIndexHeader(in, CODEC_NAME, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
         while(true) {
           final int termCount = in.readVInt();
           if (termCount == 0) {

Modified: lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoFormat.java Fri Oct 24 02:43:41 2014
@@ -60,7 +60,7 @@ public class SimpleTextSegmentInfoFormat
   public static final String SI_EXTENSION = "si";
   
   @Override
-  public SegmentInfo read(Directory directory, String segmentName, IOContext context) throws IOException {
+  public SegmentInfo read(Directory directory, String segmentName, byte[] segmentID, IOContext context) throws IOException {
     BytesRefBuilder scratch = new BytesRefBuilder();
     String segFileName = IndexFileNames.segmentFileName(segmentName, "", SimpleTextSegmentInfoFormat.SI_EXTENSION);
     ChecksumIndexInput input = directory.openChecksumInput(segFileName, context);
@@ -114,6 +114,11 @@ public class SimpleTextSegmentInfoFormat
       SimpleTextUtil.readLine(input, scratch);
       assert StringHelper.startsWith(scratch.get(), SI_ID);
       final byte[] id = Arrays.copyOfRange(scratch.bytes(), SI_ID.length, scratch.length());
+      
+      if (!Arrays.equals(segmentID, id)) {
+        throw new CorruptIndexException("file mismatch, expected: " + StringHelper.idToString(segmentID)
+                                                        + ", got: " + StringHelper.idToString(id), input);
+      }
 
       SimpleTextUtil.checkFooter(input);
 

Modified: lucene/dev/trunk/lucene/codecs/src/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat (original)
+++ lucene/dev/trunk/lucene/codecs/src/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat Fri Oct 24 02:43:41 2014
@@ -13,7 +13,7 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-org.apache.lucene.codecs.blocktreeords.Ords41PostingsFormat
+org.apache.lucene.codecs.blocktreeords.BlockTreeOrdsPostingsFormat
 org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat
 org.apache.lucene.codecs.memory.DirectPostingsFormat
 org.apache.lucene.codecs.memory.FSTOrdPostingsFormat

Modified: lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestFixedGapPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestFixedGapPostingsFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestFixedGapPostingsFormat.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestFixedGapPostingsFormat.java Fri Oct 24 02:43:41 2014
@@ -18,7 +18,6 @@ package org.apache.lucene.codecs.blockte
  */
 
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.lucene41ords.Lucene41WithOrds;
 import org.apache.lucene.index.BasePostingsFormatTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -26,7 +25,7 @@ import org.apache.lucene.util.TestUtil;
  * Basic tests of a PF using FixedGap terms dictionary
  */
 public class TestFixedGapPostingsFormat extends BasePostingsFormatTestCase {
-  private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41WithOrds(TestUtil.nextInt(random(), 1, 1000)));
+  private final Codec codec = TestUtil.alwaysPostingsFormat(new LuceneFixedGap(TestUtil.nextInt(random(), 1, 1000)));
 
   @Override
   protected Codec getCodec() {

Modified: lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapDocFreqIntervalPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapDocFreqIntervalPostingsFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapDocFreqIntervalPostingsFormat.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapDocFreqIntervalPostingsFormat.java Fri Oct 24 02:43:41 2014
@@ -18,7 +18,7 @@ package org.apache.lucene.codecs.blockte
  */
 
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.lucene41vargap.Lucene41VarGapFixedInterval;
+import org.apache.lucene.codecs.blockterms.LuceneVarGapFixedInterval;
 import org.apache.lucene.index.BasePostingsFormatTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -26,7 +26,7 @@ import org.apache.lucene.util.TestUtil;
  * Basic tests of a PF using VariableGap terms dictionary (fixed interval)
  */
 public class TestVarGapDocFreqIntervalPostingsFormat extends BasePostingsFormatTestCase {
-  private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41VarGapFixedInterval(TestUtil.nextInt(random(), 1, 1000)));
+  private final Codec codec = TestUtil.alwaysPostingsFormat(new LuceneVarGapFixedInterval(TestUtil.nextInt(random(), 1, 1000)));
 
   @Override
   protected Codec getCodec() {

Modified: lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapFixedIntervalPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapFixedIntervalPostingsFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapFixedIntervalPostingsFormat.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blockterms/TestVarGapFixedIntervalPostingsFormat.java Fri Oct 24 02:43:41 2014
@@ -18,7 +18,7 @@ package org.apache.lucene.codecs.blockte
  */
 
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.lucene41vargap.Lucene41VarGapDocFreqInterval;
+import org.apache.lucene.codecs.blockterms.LuceneVarGapDocFreqInterval;
 import org.apache.lucene.index.BasePostingsFormatTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -26,7 +26,7 @@ import org.apache.lucene.util.TestUtil;
  * Basic tests of a PF using VariableGap terms dictionary (fixed interval, docFreq threshold)
  */
 public class TestVarGapFixedIntervalPostingsFormat extends BasePostingsFormatTestCase {
-  private final Codec codec = TestUtil.alwaysPostingsFormat(new Lucene41VarGapDocFreqInterval(TestUtil.nextInt(random(), 1, 100), TestUtil.nextInt(random(), 1, 1000)));
+  private final Codec codec = TestUtil.alwaysPostingsFormat(new LuceneVarGapDocFreqInterval(TestUtil.nextInt(random(), 1, 100), TestUtil.nextInt(random(), 1, 1000)));
 
   @Override
   protected Codec getCodec() {

Modified: lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java (original)
+++ lucene/dev/trunk/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java Fri Oct 24 02:43:41 2014
@@ -39,7 +39,7 @@ import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.TestUtil;
 
 public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
-  private final Codec codec = TestUtil.alwaysPostingsFormat(new Ords41PostingsFormat());
+  private final Codec codec = TestUtil.alwaysPostingsFormat(new BlockTreeOrdsPostingsFormat());
 
   @Override
   protected Codec getCodec() {

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java Fri Oct 24 02:43:41 2014
@@ -94,46 +94,48 @@ public final class CodecUtil {
   }
   
   /**
-   * Writes a codec header for a per-segment, which records both a string to
-   * identify the file, a version number, and the unique ID of the segment. 
+   * Writes a codec header for an index file, which records both a string to
+   * identify the format of the file, a version number, and data to identify
+   * the file instance (ID and auxiliary suffix such as generation).
+   * <p>
    * This header can be parsed and validated with 
-   * {@link #checkSegmentHeader(DataInput, String, int, int, byte[], String) checkSegmentHeader()}.
+   * {@link #checkIndexHeader(DataInput, String, int, int, byte[], String) checkIndexHeader()}.
    * <p>
-   * CodecSegmentHeader --&gt; CodecHeader,SegmentID,SegmentSuffix
+   * IndexHeader --&gt; CodecHeader,ObjectID,ObjectSuffix
    * <ul>
    *    <li>CodecHeader   --&gt; {@link #writeHeader}
-   *    <li>SegmentID     --&gt; {@link DataOutput#writeByte byte}<sup>16</sup>
-   *    <li>SegmentSuffix --&gt; SuffixLength,SuffixBytes
+   *    <li>ObjectID     --&gt; {@link DataOutput#writeByte byte}<sup>16</sup>
+   *    <li>ObjectSuffix --&gt; SuffixLength,SuffixBytes
    *    <li>SuffixLength  --&gt; {@link DataOutput#writeByte byte}
    *    <li>SuffixBytes   --&gt; {@link DataOutput#writeByte byte}<sup>SuffixLength</sup>
    * </ul>
    * <p>
-   * Note that the length of a segment header depends only upon the
+   * Note that the length of an index header depends only upon the
    * name of the codec and suffix, so this length can be computed at any time
-   * with {@link #segmentHeaderLength(String,String)}.
+   * with {@link #indexHeaderLength(String,String)}.
    * 
    * @param out Output stream
-   * @param codec String to identify this file. It should be simple ASCII, 
+   * @param codec String to identify the format of this file. It should be simple ASCII, 
    *              less than 128 characters in length.
-   * @param segmentID Unique identifier for the segment
-   * @param segmentSuffix auxiliary suffix for the file. It should be simple ASCII,
+   * @param id Unique identifier for this particular file instance.
+   * @param suffix auxiliary suffix information for the file. It should be simple ASCII,
    *              less than 256 characters in length.
    * @param version Version number
    * @throws IOException If there is an I/O error writing to the underlying medium.
    * @throws IllegalArgumentException If the codec name is not simple ASCII, or 
-   *         is more than 127 characters in length, or if segmentID is invalid,
-   *         or if the segmentSuffix is not simple ASCII, or more than 255 characters
+   *         is more than 127 characters in length, or if id is invalid,
+   *         or if the suffix is not simple ASCII, or more than 255 characters
    *         in length.
    */
-  public static void writeSegmentHeader(DataOutput out, String codec, int version, byte[] segmentID, String segmentSuffix) throws IOException {
-    if (segmentID.length != StringHelper.ID_LENGTH) {
-      throw new IllegalArgumentException("Invalid id: " + StringHelper.idToString(segmentID));
+  public static void writeIndexHeader(DataOutput out, String codec, int version, byte[] id, String suffix) throws IOException {
+    if (id.length != StringHelper.ID_LENGTH) {
+      throw new IllegalArgumentException("Invalid id: " + StringHelper.idToString(id));
     }
     writeHeader(out, codec, version);
-    out.writeBytes(segmentID, 0, segmentID.length);
-    BytesRef suffixBytes = new BytesRef(segmentSuffix);
-    if (suffixBytes.length != segmentSuffix.length() || suffixBytes.length >= 256) {
-      throw new IllegalArgumentException("codec must be simple ASCII, less than 256 characters in length [got " + segmentSuffix + "]");
+    out.writeBytes(id, 0, id.length);
+    BytesRef suffixBytes = new BytesRef(suffix);
+    if (suffixBytes.length != suffix.length() || suffixBytes.length >= 256) {
+      throw new IllegalArgumentException("codec must be simple ASCII, less than 256 characters in length [got " + suffix + "]");
     }
     out.writeByte((byte)suffixBytes.length);
     out.writeBytes(suffixBytes.bytes, suffixBytes.offset, suffixBytes.length);
@@ -151,14 +153,14 @@ public final class CodecUtil {
   }
   
   /**
-   * Computes the length of a segment header.
+   * Computes the length of an index header.
    * 
    * @param codec Codec name.
-   * @return length of the entire segment header.
-   * @see #writeSegmentHeader(DataOutput, String, int, byte[], String)
+   * @return length of the entire index header.
+   * @see #writeIndexHeader(DataOutput, String, int, byte[], String)
    */
-  public static int segmentHeaderLength(String codec, String segmentSuffix) {
-    return headerLength(codec) + StringHelper.ID_LENGTH + 1 + segmentSuffix.length();
+  public static int indexHeaderLength(String codec, String suffix) {
+    return headerLength(codec) + StringHelper.ID_LENGTH + 1 + suffix.length();
   }
 
   /**
@@ -220,11 +222,11 @@ public final class CodecUtil {
   
   /**
    * Reads and validates a header previously written with 
-   * {@link #writeSegmentHeader(DataOutput, String, int, byte[], String)}.
+   * {@link #writeIndexHeader(DataOutput, String, int, byte[], String)}.
    * <p>
    * When reading a file, supply the expected <code>codec</code>,
    * expected version range (<code>minVersion to maxVersion</code>),
-   * and segment ID.
+   * and object ID and suffix.
    * 
    * @param in Input stream, positioned at the point where the
    *        header was previously written. Typically this is located
@@ -232,41 +234,53 @@ public final class CodecUtil {
    * @param codec The expected codec name.
    * @param minVersion The minimum supported expected version number.
    * @param maxVersion The maximum supported expected version number.
-   * @param segmentID The expected segment this file belongs to.
-   * @param segmentSuffix The expected auxiliary segment suffix for this file.
+   * @param expectedID The expected object identifier for this file.
+   * @param expectedSuffix The expected auxiliary suffix for this file.
    * @return The actual version found, when a valid header is found 
    *         that matches <code>codec</code>, with an actual version 
    *         where <code>minVersion <= actual <= maxVersion</code>, 
-   *         and matching <code>segmentID</code>
+   *         and matching <code>expectedID</code> and <code>expectedSuffix</code>
    *         Otherwise an exception is thrown.
    * @throws CorruptIndexException If the first four bytes are not
    *         {@link #CODEC_MAGIC}, or if the actual codec found is
-   *         not <code>codec</code>, or if the <code>segmentID</code>
-   *         or <code>segmentSuffix</code> do not match.
+   *         not <code>codec</code>, or if the <code>expectedID</code>
+   *         or <code>expectedSuffix</code> do not match.
    * @throws IndexFormatTooOldException If the actual version is less 
    *         than <code>minVersion</code>.
    * @throws IndexFormatTooNewException If the actual version is greater 
    *         than <code>maxVersion</code>.
    * @throws IOException If there is an I/O error reading from the underlying medium.
-   * @see #writeSegmentHeader(DataOutput, String, int, byte[],String)
+   * @see #writeIndexHeader(DataOutput, String, int, byte[],String)
    */
-  public static int checkSegmentHeader(DataInput in, String codec, int minVersion, int maxVersion, byte[] segmentID, String segmentSuffix) throws IOException {
+  public static int checkIndexHeader(DataInput in, String codec, int minVersion, int maxVersion, byte[] expectedID, String expectedSuffix) throws IOException {
     int version = checkHeader(in, codec, minVersion, maxVersion);
+    checkIndexHeaderID(in, expectedID);
+    checkIndexHeaderSuffix(in, expectedSuffix);
+    return version;
+  }
+  
+  /** Expert: just reads and verifies the object ID of an index header */
+  public static byte[] checkIndexHeaderID(DataInput in, byte[] expectedID) throws IOException {
     byte id[] = new byte[StringHelper.ID_LENGTH];
     in.readBytes(id, 0, id.length);
-    if (!Arrays.equals(id, segmentID)) {
-      throw new CorruptIndexException("file mismatch, expected segment id=" + StringHelper.idToString(segmentID) 
-                                                                 + ", got=" + StringHelper.idToString(id), in);
+    if (!Arrays.equals(id, expectedID)) {
+      throw new CorruptIndexException("file mismatch, expected id=" + StringHelper.idToString(expectedID) 
+                                                         + ", got=" + StringHelper.idToString(id), in);
     }
+    return id;
+  }
+  
+  /** Expert: just reads and verifies the suffix of an index header */
+  public static String checkIndexHeaderSuffix(DataInput in, String expectedSuffix) throws IOException {
     int suffixLength = in.readByte() & 0xFF;
     byte suffixBytes[] = new byte[suffixLength];
     in.readBytes(suffixBytes, 0, suffixBytes.length);
     String suffix = new String(suffixBytes, 0, suffixBytes.length, StandardCharsets.UTF_8);
-    if (!suffix.equals(segmentSuffix)) {
-      throw new CorruptIndexException("file mismatch, expected segment suffix=" + segmentSuffix
-                                                                     + ", got=" + suffix, in);
+    if (!suffix.equals(expectedSuffix)) {
+      throw new CorruptIndexException("file mismatch, expected suffix=" + expectedSuffix
+                                                             + ", got=" + suffix, in);
     }
-    return version;
+    return suffix;
   }
   
   /**

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java Fri Oct 24 02:43:41 2014
@@ -23,6 +23,7 @@ import java.io.IOException;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.store.DataInput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.Accountable;
@@ -53,7 +54,7 @@ public abstract class PostingsReaderBase
   /** Performs any initialization, such as reading and
    *  verifying the header from the provided terms
    *  dictionary {@link IndexInput}. */
-  public abstract void init(IndexInput termsIn) throws IOException;
+  public abstract void init(IndexInput termsIn, SegmentReadState state) throws IOException;
 
   /** Return a newly created empty TermState */
   public abstract BlockTermState newTermState() throws IOException;

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java Fri Oct 24 02:43:41 2014
@@ -24,6 +24,7 @@ import org.apache.lucene.codecs.blocktre
 import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
 import org.apache.lucene.index.DocsEnum; // javadocs
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.SegmentWriteState;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.IndexOutput;
@@ -50,7 +51,7 @@ public abstract class PostingsWriterBase
   /** Called once after startup, before any terms have been
    *  added.  Implementations typically write a header to
    *  the provided {@code termsOut}. */
-  public abstract void init(IndexOutput termsOut) throws IOException;
+  public abstract void init(IndexOutput termsOut, SegmentWriteState state) throws IOException;
 
   /** Write all postings for one term; use the provided
    *  {@link TermsEnum} to pull a {@link DocsEnum} or {@link

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java Fri Oct 24 02:43:41 2014
@@ -24,8 +24,6 @@ import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.store.DataOutput;
-import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 
@@ -72,11 +70,6 @@ public abstract class PushPostingsWriter
   protected PushPostingsWriterBase() {
   }
 
-  /** Called once after startup, before any terms have been
-   *  added.  Implementations typically write a header to
-   *  the provided {@code termsOut}. */
-  public abstract void init(IndexOutput termsOut) throws IOException;
-
   /** Return a newly created empty TermState */
   public abstract BlockTermState newTermState() throws IOException;
 
@@ -90,26 +83,11 @@ public abstract class PushPostingsWriter
    *  and will holds metadata from PBF when returned */
   public abstract void finishTerm(BlockTermState state) throws IOException;
 
-  /**
-   * Encode metadata as long[] and byte[]. {@code absolute} controls whether 
-   * current term is delta encoded according to latest term. 
-   * Usually elements in {@code longs} are file pointers, so each one always 
-   * increases when a new term is consumed. {@code out} is used to write generic
-   * bytes, which are not monotonic.
-   *
-   * NOTE: sometimes long[] might contain "don't care" values that are unused, e.g. 
-   * the pointer to postings list may not be defined for some terms but is defined
-   * for others, if it is designed to inline  some postings data in term dictionary.
-   * In this case, the postings writer should always use the last value, so that each
-   * element in metadata long[] remains monotonic.
-   */
-  public abstract void encodeTerm(long[] longs, DataOutput out, FieldInfo fieldInfo, BlockTermState state, boolean absolute) throws IOException;
-
   /** 
    * Sets the current field for writing, and returns the
    * fixed length of long[] metadata (which is fixed per
    * field), called when the writing switches to another field. */
-  // TODO: better name?
+  @Override
   public int setField(FieldInfo fieldInfo) {
     this.fieldInfo = fieldInfo;
     indexOptions = fieldInfo.getIndexOptions();

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/SegmentInfoFormat.java Fri Oct 24 02:43:41 2014
@@ -41,10 +41,11 @@ public abstract class SegmentInfoFormat 
    * Read {@link SegmentInfo} data from a directory.
    * @param directory directory to read from
    * @param segmentName name of the segment to read
+   * @param segmentID expected identifier for the segment
    * @return infos instance to be populated with data
    * @throws IOException If an I/O error occurs
    */
-  public abstract SegmentInfo read(Directory directory, String segmentName, IOContext context) throws IOException;
+  public abstract SegmentInfo read(Directory directory, String segmentName, byte segmentID[], IOContext context) throws IOException;
 
   /**
    * Write {@link SegmentInfo} data. 

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java Fri Oct 24 02:43:41 2014
@@ -30,17 +30,16 @@ import org.apache.lucene.codecs.Postings
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.Accountables;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.fst.ByteSequenceOutputs;
+import org.apache.lucene.util.fst.Outputs;
 
 /** A block-based terms index and dictionary that assigns
  *  terms to variable length blocks according to how they
@@ -74,8 +73,31 @@ import org.apache.lucene.util.IOUtils;
 
 public final class BlockTreeTermsReader extends FieldsProducer {
 
+  static final Outputs<BytesRef> FST_OUTPUTS = ByteSequenceOutputs.getSingleton();
+  
+  static final BytesRef NO_OUTPUT = FST_OUTPUTS.getNoOutput();
+
+  static final int OUTPUT_FLAGS_NUM_BITS = 2;
+  static final int OUTPUT_FLAGS_MASK = 0x3;
+  static final int OUTPUT_FLAG_IS_FLOOR = 0x1;
+  static final int OUTPUT_FLAG_HAS_TERMS = 0x2;
+
+  /** Extension of terms file */
+  static final String TERMS_EXTENSION = "tim";
+  final static String TERMS_CODEC_NAME = "BlockTreeTermsDict";
+
+  /** Initial terms format. */
+  public static final int VERSION_START = 0;
+
+  /** Current terms format. */
+  public static final int VERSION_CURRENT = VERSION_START;
+
+  /** Extension of terms index file */
+  static final String TERMS_INDEX_EXTENSION = "tip";
+  final static String TERMS_INDEX_CODEC_NAME = "BlockTreeTermsIndex";
+
   // Open input to the main terms dict file (_X.tib)
-  final IndexInput in;
+  final IndexInput termsIn;
 
   //private static final boolean DEBUG = BlockTreeTermsWriter.DEBUG;
 
@@ -96,105 +118,86 @@ public final class BlockTreeTermsReader 
   private final int version;
 
   /** Sole constructor. */
-  public BlockTreeTermsReader(Directory dir, FieldInfos fieldInfos, SegmentInfo info,
-                              PostingsReaderBase postingsReader, IOContext ioContext,
-                              String segmentSuffix)
-    throws IOException {
-    
-    this.postingsReader = postingsReader;
-
-    this.segment = info.name;
-    in = dir.openInput(IndexFileNames.segmentFileName(segment, segmentSuffix, BlockTreeTermsWriter.TERMS_EXTENSION),
-                       ioContext);
-
+  public BlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state) throws IOException {
     boolean success = false;
     IndexInput indexIn = null;
-
+    
+    this.postingsReader = postingsReader;
+    this.segment = state.segmentInfo.name;
+    
+    String termsName = IndexFileNames.segmentFileName(segment, state.segmentSuffix, TERMS_EXTENSION);
     try {
-      version = readHeader(in);
-      indexIn = dir.openInput(IndexFileNames.segmentFileName(segment, segmentSuffix, BlockTreeTermsWriter.TERMS_INDEX_EXTENSION),
-                                ioContext);
-      int indexVersion = readIndexHeader(indexIn);
-      if (indexVersion != version) {
-        throw new CorruptIndexException("mixmatched version files: " + in + "=" + version + "," + indexIn + "=" + indexVersion, indexIn);
-      }
+      termsIn = state.directory.openInput(termsName, state.context);
+      version = CodecUtil.checkIndexHeader(termsIn, TERMS_CODEC_NAME, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
       
-      // verify
-      if (version >= BlockTreeTermsWriter.VERSION_CHECKSUM) {
-        CodecUtil.checksumEntireFile(indexIn);
-      }
+      String indexName = IndexFileNames.segmentFileName(segment, state.segmentSuffix, TERMS_INDEX_EXTENSION);
+      indexIn = state.directory.openInput(indexName, state.context);
+      CodecUtil.checkIndexHeader(indexIn, TERMS_INDEX_CODEC_NAME, version, version, state.segmentInfo.getId(), state.segmentSuffix);
+      CodecUtil.checksumEntireFile(indexIn);
 
       // Have PostingsReader init itself
-      postingsReader.init(in);
-      
+      postingsReader.init(termsIn, state);
       
       // NOTE: data file is too costly to verify checksum against all the bytes on open,
       // but for now we at least verify proper structure of the checksum footer: which looks
       // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
       // such as file truncation.
-      if (version >= BlockTreeTermsWriter.VERSION_CHECKSUM) {
-        CodecUtil.retrieveChecksum(in);
-      }
+      CodecUtil.retrieveChecksum(termsIn);
 
       // Read per-field details
-      seekDir(in, dirOffset);
+      seekDir(termsIn, dirOffset);
       seekDir(indexIn, indexDirOffset);
 
-      final int numFields = in.readVInt();
+      final int numFields = termsIn.readVInt();
       if (numFields < 0) {
-        throw new CorruptIndexException("invalid numFields: " + numFields, in);
+        throw new CorruptIndexException("invalid numFields: " + numFields, termsIn);
       }
 
-      for(int i=0;i<numFields;i++) {
-        final int field = in.readVInt();
-        final long numTerms = in.readVLong();
+      for (int i = 0; i < numFields; ++i) {
+        final int field = termsIn.readVInt();
+        final long numTerms = termsIn.readVLong();
         if (numTerms <= 0) {
-          throw new CorruptIndexException("Illegal numTerms for field number: " + field, in);
+          throw new CorruptIndexException("Illegal numTerms for field number: " + field, termsIn);
         }
-        final int numBytes = in.readVInt();
+        final int numBytes = termsIn.readVInt();
         if (numBytes < 0) {
-          throw new CorruptIndexException("invalid rootCode for field number: " + field + ", numBytes=" + numBytes, in);
+          throw new CorruptIndexException("invalid rootCode for field number: " + field + ", numBytes=" + numBytes, termsIn);
         }
         final BytesRef rootCode = new BytesRef(new byte[numBytes]);
-        in.readBytes(rootCode.bytes, 0, numBytes);
+        termsIn.readBytes(rootCode.bytes, 0, numBytes);
         rootCode.length = numBytes;
-        final FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
+        final FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
         if (fieldInfo == null) {
-          throw new CorruptIndexException("invalid field number: " + field, in);
+          throw new CorruptIndexException("invalid field number: " + field, termsIn);
         }
-        final long sumTotalTermFreq = fieldInfo.getIndexOptions() == IndexOptions.DOCS_ONLY ? -1 : in.readVLong();
-        final long sumDocFreq = in.readVLong();
-        final int docCount = in.readVInt();
-        final int longsSize = version >= BlockTreeTermsWriter.VERSION_META_ARRAY ? in.readVInt() : 0;
+        final long sumTotalTermFreq = fieldInfo.getIndexOptions() == IndexOptions.DOCS_ONLY ? -1 : termsIn.readVLong();
+        final long sumDocFreq = termsIn.readVLong();
+        final int docCount = termsIn.readVInt();
+        final int longsSize = termsIn.readVInt();
         if (longsSize < 0) {
-          throw new CorruptIndexException("invalid longsSize for field: " + fieldInfo.name + ", longsSize=" + longsSize, in);
+          throw new CorruptIndexException("invalid longsSize for field: " + fieldInfo.name + ", longsSize=" + longsSize, termsIn);
         }
-        BytesRef minTerm, maxTerm;
-        if (version >= BlockTreeTermsWriter.VERSION_MIN_MAX_TERMS) {
-          minTerm = readBytesRef(in);
-          maxTerm = readBytesRef(in);
-        } else {
-          minTerm = maxTerm = null;
-        }
-        if (docCount < 0 || docCount > info.getDocCount()) { // #docs with field must be <= #docs
-          throw new CorruptIndexException("invalid docCount: " + docCount + " maxDoc: " + info.getDocCount(), in);
+        BytesRef minTerm = readBytesRef(termsIn);
+        BytesRef maxTerm = readBytesRef(termsIn);
+        if (docCount < 0 || docCount > state.segmentInfo.getDocCount()) { // #docs with field must be <= #docs
+          throw new CorruptIndexException("invalid docCount: " + docCount + " maxDoc: " + state.segmentInfo.getDocCount(), termsIn);
         }
         if (sumDocFreq < docCount) {  // #postings must be >= #docs with field
-          throw new CorruptIndexException("invalid sumDocFreq: " + sumDocFreq + " docCount: " + docCount, in);
+          throw new CorruptIndexException("invalid sumDocFreq: " + sumDocFreq + " docCount: " + docCount, termsIn);
         }
         if (sumTotalTermFreq != -1 && sumTotalTermFreq < sumDocFreq) { // #positions must be >= #postings
-          throw new CorruptIndexException("invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq, in);
+          throw new CorruptIndexException("invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq, termsIn);
         }
         final long indexStartFP = indexIn.readVLong();
         FieldReader previous = fields.put(fieldInfo.name,       
                                           new FieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq, docCount,
                                                           indexStartFP, longsSize, indexIn, minTerm, maxTerm));
         if (previous != null) {
-          throw new CorruptIndexException("duplicate field: " + fieldInfo.name, in);
+          throw new CorruptIndexException("duplicate field: " + fieldInfo.name, termsIn);
         }
       }
+      
       indexIn.close();
-
       success = true;
     } finally {
       if (!success) {
@@ -212,38 +215,11 @@ public final class BlockTreeTermsReader 
     return bytes;
   }
 
-  /** Reads terms file header. */
-  private int readHeader(IndexInput input) throws IOException {
-    int version = CodecUtil.checkHeader(input, BlockTreeTermsWriter.TERMS_CODEC_NAME,
-                          BlockTreeTermsWriter.VERSION_START,
-                          BlockTreeTermsWriter.VERSION_CURRENT);
-    if (version < BlockTreeTermsWriter.VERSION_APPEND_ONLY) {
-      dirOffset = input.readLong();
-    }
-    return version;
-  }
-
-  /** Reads index file header. */
-  private int readIndexHeader(IndexInput input) throws IOException {
-    int version = CodecUtil.checkHeader(input, BlockTreeTermsWriter.TERMS_INDEX_CODEC_NAME,
-                          BlockTreeTermsWriter.VERSION_START,
-                          BlockTreeTermsWriter.VERSION_CURRENT);
-    if (version < BlockTreeTermsWriter.VERSION_APPEND_ONLY) {
-      indexDirOffset = input.readLong(); 
-    }
-    return version;
-  }
-
   /** Seek {@code input} to the directory offset. */
   private void seekDir(IndexInput input, long dirOffset)
       throws IOException {
-    if (version >= BlockTreeTermsWriter.VERSION_CHECKSUM) {
-      input.seek(input.length() - CodecUtil.footerLength() - 8);
-      dirOffset = input.readLong();
-    } else if (version >= BlockTreeTermsWriter.VERSION_APPEND_ONLY) {
-      input.seek(input.length() - 8);
-      dirOffset = input.readLong();
-    }
+    input.seek(input.length() - CodecUtil.footerLength() - 8);
+    dirOffset = input.readLong();
     input.seek(dirOffset);
   }
 
@@ -255,7 +231,7 @@ public final class BlockTreeTermsReader 
   @Override
   public void close() throws IOException {
     try {
-      IOUtils.close(in, postingsReader);
+      IOUtils.close(termsIn, postingsReader);
     } finally { 
       // Clear so refs to terms index is GCable even if
       // app hangs onto us:
@@ -313,14 +289,12 @@ public final class BlockTreeTermsReader 
   }
 
   @Override
-  public void checkIntegrity() throws IOException {
-    if (version >= BlockTreeTermsWriter.VERSION_CHECKSUM) {      
-      // term dictionary
-      CodecUtil.checksumEntireFile(in);
+  public void checkIntegrity() throws IOException { 
+    // term dictionary
+    CodecUtil.checksumEntireFile(termsIn);
       
-      // postings
-      postingsReader.checkIntegrity();
-    }
+    // postings
+    postingsReader.checkIntegrity();
   }
 
   @Override

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java Fri Oct 24 02:43:41 2014
@@ -41,14 +41,12 @@ import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.IntsRefBuilder;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.fst.Builder;
 import org.apache.lucene.util.fst.ByteSequenceOutputs;
 import org.apache.lucene.util.fst.BytesRefFSTEnum;
 import org.apache.lucene.util.fst.FST;
-import org.apache.lucene.util.fst.Outputs;
 import org.apache.lucene.util.fst.Util;
 import org.apache.lucene.util.packed.PackedInts;
 
@@ -192,10 +190,6 @@ import org.apache.lucene.util.packed.Pac
  */
 public final class BlockTreeTermsWriter extends FieldsConsumer {
 
-  static final Outputs<BytesRef> FST_OUTPUTS = ByteSequenceOutputs.getSingleton();
-
-  static final BytesRef NO_OUTPUT = FST_OUTPUTS.getNoOutput();
-
   /** Suggested default value for the {@code
    *  minItemsInBlock} parameter to {@link
    *  #BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}. */
@@ -209,38 +203,7 @@ public final class BlockTreeTermsWriter 
   // public final static boolean DEBUG = false;
   //private final static boolean SAVE_DOT_FILES = false;
 
-  static final int OUTPUT_FLAGS_NUM_BITS = 2;
-  static final int OUTPUT_FLAGS_MASK = 0x3;
-  static final int OUTPUT_FLAG_IS_FLOOR = 0x1;
-  static final int OUTPUT_FLAG_HAS_TERMS = 0x2;
-
-  /** Extension of terms file */
-  static final String TERMS_EXTENSION = "tim";
-  final static String TERMS_CODEC_NAME = "BLOCK_TREE_TERMS_DICT";
-
-  /** Initial terms format. */
-  public static final int VERSION_START = 0;
-  
-  /** Append-only */
-  public static final int VERSION_APPEND_ONLY = 1;
-
-  /** Meta data as array */
-  public static final int VERSION_META_ARRAY = 2;
-  
-  /** checksums */
-  public static final int VERSION_CHECKSUM = 3;
-
-  /** min/max term */
-  public static final int VERSION_MIN_MAX_TERMS = 4;
-
-  /** Current terms format. */
-  public static final int VERSION_CURRENT = VERSION_MIN_MAX_TERMS;
-
-  /** Extension of terms index file */
-  static final String TERMS_INDEX_EXTENSION = "tip";
-  final static String TERMS_INDEX_CODEC_NAME = "BLOCK_TREE_TERMS_INDEX";
-
-  private final IndexOutput out;
+  private final IndexOutput termsOut;
   private final IndexOutput indexOut;
   final int maxDoc;
   final int minItemsInBlock;
@@ -286,8 +249,7 @@ public final class BlockTreeTermsWriter 
    *  sub-blocks) per block will aim to be between
    *  minItemsPerBlock and maxItemsPerBlock, though in some
    *  cases the blocks may be smaller than the min. */
-  public BlockTreeTermsWriter(
-                              SegmentWriteState state,
+  public BlockTreeTermsWriter(SegmentWriteState state,
                               PostingsWriterBase postingsWriter,
                               int minItemsInBlock,
                               int maxItemsInBlock)
@@ -306,47 +268,34 @@ public final class BlockTreeTermsWriter 
       throw new IllegalArgumentException("maxItemsInBlock must be at least 2*(minItemsInBlock-1); got maxItemsInBlock=" + maxItemsInBlock + " minItemsInBlock=" + minItemsInBlock);
     }
 
-    maxDoc = state.segmentInfo.getDocCount();
+    this.maxDoc = state.segmentInfo.getDocCount();
+    this.fieldInfos = state.fieldInfos;
+    this.minItemsInBlock = minItemsInBlock;
+    this.maxItemsInBlock = maxItemsInBlock;
+    this.postingsWriter = postingsWriter;
 
-    final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
-    out = state.directory.createOutput(termsFileName, state.context);
+    final String termsName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, BlockTreeTermsReader.TERMS_EXTENSION);
+    termsOut = state.directory.createOutput(termsName, state.context);
     boolean success = false;
     IndexOutput indexOut = null;
     try {
-      fieldInfos = state.fieldInfos;
-      this.minItemsInBlock = minItemsInBlock;
-      this.maxItemsInBlock = maxItemsInBlock;
-      writeHeader(out);
-
-      //DEBUG = state.segmentName.equals("_4a");
-
-      final String termsIndexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
-      indexOut = state.directory.createOutput(termsIndexFileName, state.context);
-      writeIndexHeader(indexOut);
+      CodecUtil.writeIndexHeader(termsOut, BlockTreeTermsReader.TERMS_CODEC_NAME, BlockTreeTermsReader.VERSION_CURRENT,
+                                  state.segmentInfo.getId(), state.segmentSuffix);
 
-      this.postingsWriter = postingsWriter;
-      // segment = state.segmentInfo.name;
+      final String indexName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, BlockTreeTermsReader.TERMS_INDEX_EXTENSION);
+      indexOut = state.directory.createOutput(indexName, state.context);
+      CodecUtil.writeIndexHeader(indexOut, BlockTreeTermsReader.TERMS_INDEX_CODEC_NAME, BlockTreeTermsReader.VERSION_CURRENT,
+                                   state.segmentInfo.getId(), state.segmentSuffix);
 
-      // System.out.println("BTW.init seg=" + state.segmentName);
-
-      postingsWriter.init(out);                          // have consumer write its format/header
+      postingsWriter.init(termsOut, state);                          // have consumer write its format/header
+      
+      this.indexOut = indexOut;
       success = true;
     } finally {
       if (!success) {
-        IOUtils.closeWhileHandlingException(out, indexOut);
+        IOUtils.closeWhileHandlingException(termsOut, indexOut);
       }
     }
-    this.indexOut = indexOut;
-  }
-
-  /** Writes the terms file header. */
-  private void writeHeader(IndexOutput out) throws IOException {
-    CodecUtil.writeHeader(out, TERMS_CODEC_NAME, VERSION_CURRENT);   
-  }
-
-  /** Writes the index file header. */
-  private void writeIndexHeader(IndexOutput out) throws IOException {
-    CodecUtil.writeHeader(out, TERMS_INDEX_CODEC_NAME, VERSION_CURRENT); 
   }
 
   /** Writes the terms file trailer. */
@@ -389,7 +338,7 @@ public final class BlockTreeTermsWriter 
   
   static long encodeOutput(long fp, boolean hasTerms, boolean isFloor) {
     assert fp < (1L << 62);
-    return (fp << 2) | (hasTerms ? OUTPUT_FLAG_HAS_TERMS : 0) | (isFloor ? OUTPUT_FLAG_IS_FLOOR : 0);
+    return (fp << 2) | (hasTerms ? BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS : 0) | (isFloor ? BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR : 0);
   }
 
   private static class PendingEntry {
@@ -686,7 +635,7 @@ public final class BlockTreeTermsWriter 
 
       assert end > start;
 
-      long startFP = out.getFilePointer();
+      long startFP = termsOut.getFilePointer();
 
       boolean hasFloorLeadLabel = isFloor && floorLeadLabel != -1;
 
@@ -701,7 +650,7 @@ public final class BlockTreeTermsWriter 
         // Last block:
         code |= 1;
       }
-      out.writeVInt(code);
+      termsOut.writeVInt(code);
 
       /*
       if (DEBUG) {
@@ -847,18 +796,18 @@ public final class BlockTreeTermsWriter 
       // search on lookup
 
       // Write suffixes byte[] blob to terms dict output:
-      out.writeVInt((int) (suffixWriter.getFilePointer() << 1) | (isLeafBlock ? 1:0));
-      suffixWriter.writeTo(out);
+      termsOut.writeVInt((int) (suffixWriter.getFilePointer() << 1) | (isLeafBlock ? 1:0));
+      suffixWriter.writeTo(termsOut);
       suffixWriter.reset();
 
       // Write term stats byte[] blob
-      out.writeVInt((int) statsWriter.getFilePointer());
-      statsWriter.writeTo(out);
+      termsOut.writeVInt((int) statsWriter.getFilePointer());
+      statsWriter.writeTo(termsOut);
       statsWriter.reset();
 
       // Write term meta data byte[] blob
-      out.writeVInt((int) metaWriter.getFilePointer());
-      metaWriter.writeTo(out);
+      termsOut.writeVInt((int) metaWriter.getFilePointer());
+      metaWriter.writeTo(termsOut);
       metaWriter.reset();
 
       // if (DEBUG) {
@@ -1013,38 +962,38 @@ public final class BlockTreeTermsWriter 
     boolean success = false;
     try {
       
-      final long dirStart = out.getFilePointer();
+      final long dirStart = termsOut.getFilePointer();
       final long indexDirStart = indexOut.getFilePointer();
 
-      out.writeVInt(fields.size());
+      termsOut.writeVInt(fields.size());
       
       for(FieldMetaData field : fields) {
         //System.out.println("  field " + field.fieldInfo.name + " " + field.numTerms + " terms");
-        out.writeVInt(field.fieldInfo.number);
+        termsOut.writeVInt(field.fieldInfo.number);
         assert field.numTerms > 0;
-        out.writeVLong(field.numTerms);
-        out.writeVInt(field.rootCode.length);
-        out.writeBytes(field.rootCode.bytes, field.rootCode.offset, field.rootCode.length);
+        termsOut.writeVLong(field.numTerms);
+        termsOut.writeVInt(field.rootCode.length);
+        termsOut.writeBytes(field.rootCode.bytes, field.rootCode.offset, field.rootCode.length);
         if (field.fieldInfo.getIndexOptions() != IndexOptions.DOCS_ONLY) {
-          out.writeVLong(field.sumTotalTermFreq);
+          termsOut.writeVLong(field.sumTotalTermFreq);
         }
-        out.writeVLong(field.sumDocFreq);
-        out.writeVInt(field.docCount);
-        out.writeVInt(field.longsSize);
+        termsOut.writeVLong(field.sumDocFreq);
+        termsOut.writeVInt(field.docCount);
+        termsOut.writeVInt(field.longsSize);
         indexOut.writeVLong(field.indexStartFP);
-        writeBytesRef(out, field.minTerm);
-        writeBytesRef(out, field.maxTerm);
+        writeBytesRef(termsOut, field.minTerm);
+        writeBytesRef(termsOut, field.maxTerm);
       }
-      writeTrailer(out, dirStart);
-      CodecUtil.writeFooter(out);
+      writeTrailer(termsOut, dirStart);
+      CodecUtil.writeFooter(termsOut);
       writeIndexTrailer(indexOut, indexDirStart);
       CodecUtil.writeFooter(indexOut);
       success = true;
     } finally {
       if (success) {
-        IOUtils.close(out, indexOut, postingsWriter);
+        IOUtils.close(termsOut, indexOut, postingsWriter);
       } else {
-        IOUtils.closeWhileHandlingException(out, indexOut, postingsWriter);
+        IOUtils.closeWhileHandlingException(termsOut, indexOut, postingsWriter);
       }
     }
   }

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java Fri Oct 24 02:43:41 2014
@@ -34,8 +34,10 @@ import org.apache.lucene.util.automaton.
 import org.apache.lucene.util.fst.ByteSequenceOutputs;
 import org.apache.lucene.util.fst.FST;
 
-/** BlockTree's implementation of {@link Terms}. */
-// public for CheckIndex:
+/**
+ * BlockTree's implementation of {@link Terms}.
+ * @lucene.internal
+ */
 public final class FieldReader extends Terms implements Accountable {
 
   private static final long BASE_RAM_BYTES_USED =
@@ -77,7 +79,7 @@ public final class FieldReader extends T
     //   System.out.println("BTTR: seg=" + segment + " field=" + fieldInfo.name + " rootBlockCode=" + rootCode + " divisor=" + indexDivisor);
     // }
 
-    rootBlockFP = (new ByteArrayDataInput(rootCode.bytes, rootCode.offset, rootCode.length)).readVLong() >>> BlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS;
+    rootBlockFP = (new ByteArrayDataInput(rootCode.bytes, rootCode.offset, rootCode.length)).readVLong() >>> BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS;
 
     if (indexIn != null) {
       final IndexInput clone = indexIn.clone();
@@ -120,8 +122,8 @@ public final class FieldReader extends T
   }
 
   /** For debugging -- used by CheckIndex too*/
-  // TODO: maybe push this into Terms?
-  public Stats computeStats() throws IOException {
+  @Override
+  public Stats getStats() throws IOException {
     return new SegmentTermsEnum(this).computeBlockStats();
   }
 

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java Fri Oct 24 02:43:41 2014
@@ -67,7 +67,7 @@ final class IntersectTermsEnum extends T
     this.fr = fr;
     runAutomaton = compiled.runAutomaton;
     compiledAutomaton = compiled;
-    in = fr.parent.in.clone();
+    in = fr.parent.termsIn.clone();
     stack = new IntersectTermsEnumFrame[5];
     for(int idx=0;idx<stack.length;idx++) {
       stack[idx] = new IntersectTermsEnumFrame(this, idx);

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java Fri Oct 24 02:43:41 2014
@@ -145,7 +145,7 @@ final class IntersectTermsEnumFrame {
       // Skip first long -- has redundant fp, hasTerms
       // flag, isFloor flag
       final long code = floorDataReader.readVLong();
-      if ((code & BlockTreeTermsWriter.OUTPUT_FLAG_IS_FLOOR) != 0) {
+      if ((code & BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR) != 0) {
         numFollowFloorBlocks = floorDataReader.readVInt();
         nextFloorLabel = floorDataReader.readByte() & 0xff;
         // if (DEBUG) System.out.println("    numFollowFloorBlocks=" + numFollowFloorBlocks + " nextFloorLabel=" + nextFloorLabel);

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java Fri Oct 24 02:43:41 2014
@@ -113,7 +113,7 @@ final class SegmentTermsEnum extends Ter
   // Not private to avoid synthetic access$NNN methods
   void initIndexInput() {
     if (this.in == null) {
-      this.in = fr.parent.in.clone();
+      this.in = fr.parent.termsIn.clone();
     }
   }
 
@@ -238,11 +238,11 @@ final class SegmentTermsEnum extends Ter
   SegmentTermsEnumFrame pushFrame(FST.Arc<BytesRef> arc, BytesRef frameData, int length) throws IOException {
     scratchReader.reset(frameData.bytes, frameData.offset, frameData.length);
     final long code = scratchReader.readVLong();
-    final long fpSeek = code >>> BlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS;
+    final long fpSeek = code >>> BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS;
     final SegmentTermsEnumFrame f = getFrame(1+currentFrame.ord);
-    f.hasTerms = (code & BlockTreeTermsWriter.OUTPUT_FLAG_HAS_TERMS) != 0;
+    f.hasTerms = (code & BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS) != 0;
     f.hasTermsOrig = f.hasTerms;
-    f.isFloor = (code & BlockTreeTermsWriter.OUTPUT_FLAG_IS_FLOOR) != 0;
+    f.isFloor = (code & BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR) != 0;
     if (f.isFloor) {
       f.setFloorData(scratchReader, frameData);
     }
@@ -370,8 +370,8 @@ final class SegmentTermsEnum extends Ter
         }
         arc = arcs[1+targetUpto];
         assert arc.label == (target.bytes[target.offset + targetUpto] & 0xFF): "arc.label=" + (char) arc.label + " targetLabel=" + (char) (target.bytes[target.offset + targetUpto] & 0xFF);
-        if (arc.output != BlockTreeTermsWriter.NO_OUTPUT) {
-          output = BlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.output);
+        if (arc.output != BlockTreeTermsReader.NO_OUTPUT) {
+          output = BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output);
         }
         if (arc.isFinal()) {
           lastFrame = stack[1+lastFrame.ord];
@@ -461,7 +461,7 @@ final class SegmentTermsEnum extends Ter
 
       //term.length = 0;
       targetUpto = 0;
-      currentFrame = pushFrame(arc, BlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput), 0);
+      currentFrame = pushFrame(arc, BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput), 0);
     }
 
     // if (DEBUG) {
@@ -517,8 +517,8 @@ final class SegmentTermsEnum extends Ter
         term.setByteAt(targetUpto, (byte) targetLabel);
         // Aggregate output as we go:
         assert arc.output != null;
-        if (arc.output != BlockTreeTermsWriter.NO_OUTPUT) {
-          output = BlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.output);
+        if (arc.output != BlockTreeTermsReader.NO_OUTPUT) {
+          output = BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output);
         }
 
         // if (DEBUG) {
@@ -528,7 +528,7 @@ final class SegmentTermsEnum extends Ter
 
         if (arc.isFinal()) {
           //if (DEBUG) System.out.println("    arc is final!");
-          currentFrame = pushFrame(arc, BlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput), targetUpto);
+          currentFrame = pushFrame(arc, BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput), targetUpto);
           //if (DEBUG) System.out.println("    curFrame.ord=" + currentFrame.ord + " hasTerms=" + currentFrame.hasTerms);
         }
       }
@@ -612,7 +612,7 @@ final class SegmentTermsEnum extends Ter
 
       int cmp = 0;
 
-      // TOOD: we should write our vLong backwards (MSB
+      // TODO: we should write our vLong backwards (MSB
       // first) to get better sharing from the FST
 
       // First compare up to valid seek frames:
@@ -626,13 +626,13 @@ final class SegmentTermsEnum extends Ter
         }
         arc = arcs[1+targetUpto];
         assert arc.label == (target.bytes[target.offset + targetUpto] & 0xFF): "arc.label=" + (char) arc.label + " targetLabel=" + (char) (target.bytes[target.offset + targetUpto] & 0xFF);
-        // TOOD: we could save the outputs in local
+        // TODO: we could save the outputs in local
         // byte[][] instead of making new objs ever
         // seek; but, often the FST doesn't have any
         // shared bytes (but this could change if we
         // reverse vLong byte order)
-        if (arc.output != BlockTreeTermsWriter.NO_OUTPUT) {
-          output = BlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.output);
+        if (arc.output != BlockTreeTermsReader.NO_OUTPUT) {
+          output = BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output);
         }
         if (arc.isFinal()) {
           lastFrame = stack[1+lastFrame.ord];
@@ -717,7 +717,7 @@ final class SegmentTermsEnum extends Ter
 
       //term.length = 0;
       targetUpto = 0;
-      currentFrame = pushFrame(arc, BlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput), 0);
+      currentFrame = pushFrame(arc, BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput), 0);
     }
 
     //if (DEBUG) {
@@ -773,8 +773,8 @@ final class SegmentTermsEnum extends Ter
         arc = nextArc;
         // Aggregate output as we go:
         assert arc.output != null;
-        if (arc.output != BlockTreeTermsWriter.NO_OUTPUT) {
-          output = BlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.output);
+        if (arc.output != BlockTreeTermsReader.NO_OUTPUT) {
+          output = BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output);
         }
 
         //if (DEBUG) {
@@ -784,7 +784,7 @@ final class SegmentTermsEnum extends Ter
 
         if (arc.isFinal()) {
           //if (DEBUG) System.out.println("    arc is final!");
-          currentFrame = pushFrame(arc, BlockTreeTermsWriter.FST_OUTPUTS.add(output, arc.nextFinalOutput), targetUpto);
+          currentFrame = pushFrame(arc, BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput), targetUpto);
           //if (DEBUG) System.out.println("    curFrame.ord=" + currentFrame.ord + " hasTerms=" + currentFrame.hasTerms);
         }
       }
@@ -831,9 +831,9 @@ final class SegmentTermsEnum extends Ter
         assert f != null;
         final BytesRef prefix = new BytesRef(term.get().bytes, 0, f.prefix);
         if (f.nextEnt == -1) {
-          out.println("    frame " + (isSeekFrame ? "(seek)" : "(next)") + " ord=" + ord + " fp=" + f.fp + (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "") + " prefixLen=" + f.prefix + " prefix=" + prefix + (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")")) + " hasTerms=" + f.hasTerms + " isFloor=" + f.isFloor + " code=" + ((f.fp<<BlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS) + (f.hasTerms ? BlockTreeTermsWriter.OUTPUT_FLAG_HAS_TERMS:0) + (f.isFloor ? BlockTreeTermsWriter.OUTPUT_FLAG_IS_FLOOR:0)) + " isLastInFloor=" + f.isLastInFloor + " mdUpto=" + f.metaDataUpto + " tbOrd=" + f.getTermBlockOrd());
+          out.println("    frame " + (isSeekFrame ? "(seek)" : "(next)") + " ord=" + ord + " fp=" + f.fp + (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "") + " prefixLen=" + f.prefix + " prefix=" + prefix + (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")")) + " hasTerms=" + f.hasTerms + " isFloor=" + f.isFloor + " code=" + ((f.fp<< BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) + (f.hasTerms ? BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS:0) + (f.isFloor ? BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR:0)) + " isLastInFloor=" + f.isLastInFloor + " mdUpto=" + f.metaDataUpto + " tbOrd=" + f.getTermBlockOrd());
         } else {
-          out.println("    frame " + (isSeekFrame ? "(seek, loaded)" : "(next, loaded)") + " ord=" + ord + " fp=" + f.fp + (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "") + " prefixLen=" + f.prefix + " prefix=" + prefix + " nextEnt=" + f.nextEnt + (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")")) + " hasTerms=" + f.hasTerms + " isFloor=" + f.isFloor + " code=" + ((f.fp<<BlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS) + (f.hasTerms ? BlockTreeTermsWriter.OUTPUT_FLAG_HAS_TERMS:0) + (f.isFloor ? BlockTreeTermsWriter.OUTPUT_FLAG_IS_FLOOR:0)) + " lastSubFP=" + f.lastSubFP + " isLastInFloor=" + f.isLastInFloor + " mdUpto=" + f.metaDataUpto + " tbOrd=" + f.getTermBlockOrd());
+          out.println("    frame " + (isSeekFrame ? "(seek, loaded)" : "(next, loaded)") + " ord=" + ord + " fp=" + f.fp + (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "") + " prefixLen=" + f.prefix + " prefix=" + prefix + " nextEnt=" + f.nextEnt + (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")")) + " hasTerms=" + f.hasTerms + " isFloor=" + f.isFloor + " code=" + ((f.fp<< BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) + (f.hasTerms ? BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS:0) + (f.isFloor ? BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR:0)) + " lastSubFP=" + f.lastSubFP + " isLastInFloor=" + f.isLastInFloor + " mdUpto=" + f.metaDataUpto + " tbOrd=" + f.getTermBlockOrd());
         }
         if (fr.index != null) {
           assert !isSeekFrame || f.arc != null: "isSeekFrame=" + isSeekFrame + " f.arc=" + f.arc;
@@ -848,7 +848,7 @@ final class SegmentTermsEnum extends Ter
           } else if (isSeekFrame && !f.isFloor) {
             final ByteArrayDataInput reader = new ByteArrayDataInput(output.bytes, output.offset, output.length);
             final long codeOrig = reader.readVLong();
-            final long code = (f.fp << BlockTreeTermsWriter.OUTPUT_FLAGS_NUM_BITS) | (f.hasTerms ? BlockTreeTermsWriter.OUTPUT_FLAG_HAS_TERMS:0) | (f.isFloor ? BlockTreeTermsWriter.OUTPUT_FLAG_IS_FLOOR:0);
+            final long code = (f.fp << BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) | (f.hasTerms ? BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS:0) | (f.isFloor ? BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR:0);
             if (codeOrig != code) {
               out.println("      broken seek state: output code=" + codeOrig + " doesn't match frame code=" + code);
               throw new RuntimeException("seek state is broken");

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/Stats.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/Stats.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/Stats.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/Stats.java Fri Oct 24 02:43:41 2014
@@ -22,14 +22,15 @@ import java.io.PrintStream;
 import java.io.UnsupportedEncodingException;
 import java.util.Locale;
 
-import org.apache.lucene.codecs.PostingsBaseFormat;
+import org.apache.lucene.codecs.PostingsReaderBase;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
 /**
  * BlockTree statistics for a single field 
- * returned by {@link FieldReader#computeStats()}.
+ * returned by {@link FieldReader#getStats()}.
+ * @lucene.internal
  */
 public class Stats {
   /** How many nodes in the index FST. */
@@ -81,11 +82,11 @@ public class Stats {
   public long totalBlockSuffixBytes;
 
   /** Total number of bytes used to store term stats (not
-   *  including what the {@link PostingsBaseFormat}
+   *  including what the {@link PostingsReaderBase}
    *  stores. */
   public long totalBlockStatsBytes;
 
-  /** Total bytes stored by the {@link PostingsBaseFormat},
+  /** Total bytes stored by the {@link PostingsReaderBase},
    *  plus the other few vInts stored in the frame. */
   public long totalBlockOtherBytes;
 

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/package.html
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/package.html?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/package.html (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/blocktree/package.html Fri Oct 24 02:43:41 2014
@@ -27,7 +27,7 @@ This terms dictionary organizes all term
 shared prefix, such that each block has enough terms, and then stores
 the prefix trie in memory as an FST as the index structure.  It allows
 you to plug in your own {@link
-org.apache.lucene.codecs.PostingsBaseFormat} to implement the
+org.apache.lucene.codecs.PostingsWriterBase} to implement the
 postings.
 </p>
 

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsFormat.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsFormat.java Fri Oct 24 02:43:41 2014
@@ -64,7 +64,7 @@ public class CompressingStoredFieldsForm
    * <p>
    * <code>formatName</code> is the name of the format. This name will be used
    * in the file formats to perform
-   * {@link CodecUtil#checkSegmentHeader codec header checks}.
+   * {@link CodecUtil#checkIndexHeader codec header checks}.
    * <p>
    * <code>segmentSuffix</code> is the segment suffix. This suffix is added to 
    * the result file name only if it's not the empty string.

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java Fri Oct 24 02:43:41 2014
@@ -118,8 +118,8 @@ public final class CompressingStoredFiel
       Throwable priorE = null;
       try {
         final String codecNameIdx = formatName + CODEC_SFX_IDX;
-        version = CodecUtil.checkSegmentHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT, si.getId(), segmentSuffix);
-        assert CodecUtil.segmentHeaderLength(codecNameIdx, segmentSuffix) == indexStream.getFilePointer();
+        version = CodecUtil.checkIndexHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT, si.getId(), segmentSuffix);
+        assert CodecUtil.indexHeaderLength(codecNameIdx, segmentSuffix) == indexStream.getFilePointer();
         indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
         maxPointer = indexStream.readVLong();
       } catch (Throwable exception) {
@@ -141,11 +141,11 @@ public final class CompressingStoredFiel
         throw new CorruptIndexException("Invalid fieldsStream maxPointer (file truncated?): maxPointer=" + maxPointer + ", length=" + fieldsStream.length(), fieldsStream);
       }
       final String codecNameDat = formatName + CODEC_SFX_DAT;
-      final int fieldsVersion = CodecUtil.checkSegmentHeader(fieldsStream, codecNameDat, VERSION_START, VERSION_CURRENT, si.getId(), segmentSuffix);
+      final int fieldsVersion = CodecUtil.checkIndexHeader(fieldsStream, codecNameDat, VERSION_START, VERSION_CURRENT, si.getId(), segmentSuffix);
       if (version != fieldsVersion) {
         throw new CorruptIndexException("Version mismatch between stored fields index and data: " + version + " != " + fieldsVersion, fieldsStream);
       }
-      assert CodecUtil.segmentHeaderLength(codecNameDat, segmentSuffix) == fieldsStream.getFilePointer();
+      assert CodecUtil.indexHeaderLength(codecNameDat, segmentSuffix) == fieldsStream.getFilePointer();
 
       chunkSize = fieldsStream.readVInt();
       packedIntsVersion = fieldsStream.readVInt();

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java Fri Oct 24 02:43:41 2014
@@ -29,13 +29,9 @@ import org.apache.lucene.index.CorruptIn
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.MergeState;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.SegmentReader;
 import org.apache.lucene.index.StorableField;
-import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
@@ -118,10 +114,10 @@ public final class CompressingStoredFiel
 
       final String codecNameIdx = formatName + CODEC_SFX_IDX;
       final String codecNameDat = formatName + CODEC_SFX_DAT;
-      CodecUtil.writeSegmentHeader(indexStream, codecNameIdx, VERSION_CURRENT, si.getId(), segmentSuffix);
-      CodecUtil.writeSegmentHeader(fieldsStream, codecNameDat, VERSION_CURRENT, si.getId(), segmentSuffix);
-      assert CodecUtil.segmentHeaderLength(codecNameDat, segmentSuffix) == fieldsStream.getFilePointer();
-      assert CodecUtil.segmentHeaderLength(codecNameIdx, segmentSuffix) == indexStream.getFilePointer();
+      CodecUtil.writeIndexHeader(indexStream, codecNameIdx, VERSION_CURRENT, si.getId(), segmentSuffix);
+      CodecUtil.writeIndexHeader(fieldsStream, codecNameDat, VERSION_CURRENT, si.getId(), segmentSuffix);
+      assert CodecUtil.indexHeaderLength(codecNameDat, segmentSuffix) == fieldsStream.getFilePointer();
+      assert CodecUtil.indexHeaderLength(codecNameIdx, segmentSuffix) == indexStream.getFilePointer();
 
       indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
       indexStream = null;

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsFormat.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsFormat.java Fri Oct 24 02:43:41 2014
@@ -46,7 +46,7 @@ public class CompressingTermVectorsForma
    * <p>
    * <code>formatName</code> is the name of the format. This name will be used
    * in the file formats to perform
-   * {@link CodecUtil#checkSegmentHeader codec header checks}.
+   * {@link CodecUtil#checkIndexHeader codec header checks}.
    * <p>
    * The <code>compressionMode</code> parameter allows you to choose between
    * compression algorithms that have various compression and decompression

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java Fri Oct 24 02:43:41 2014
@@ -114,8 +114,8 @@ public final class CompressingTermVector
       Throwable priorE = null;
       try {
         final String codecNameIdx = formatName + CODEC_SFX_IDX;
-        version = CodecUtil.checkSegmentHeader(input, codecNameIdx, VERSION_START, VERSION_CURRENT, si.getId(), segmentSuffix);
-        assert CodecUtil.segmentHeaderLength(codecNameIdx, segmentSuffix) == input.getFilePointer();
+        version = CodecUtil.checkIndexHeader(input, codecNameIdx, VERSION_START, VERSION_CURRENT, si.getId(), segmentSuffix);
+        assert CodecUtil.indexHeaderLength(codecNameIdx, segmentSuffix) == input.getFilePointer();
         indexReader = new CompressingStoredFieldsIndexReader(input, si);
         input.readVLong(); // the end of the data file
       } catch (Throwable exception) {
@@ -133,11 +133,11 @@ public final class CompressingTermVector
       final String vectorsStreamFN = IndexFileNames.segmentFileName(segment, segmentSuffix, VECTORS_EXTENSION);
       vectorsStream = d.openInput(vectorsStreamFN, context);
       final String codecNameDat = formatName + CODEC_SFX_DAT;
-      int version2 = CodecUtil.checkSegmentHeader(vectorsStream, codecNameDat, VERSION_START, VERSION_CURRENT, si.getId(), segmentSuffix);
+      int version2 = CodecUtil.checkIndexHeader(vectorsStream, codecNameDat, VERSION_START, VERSION_CURRENT, si.getId(), segmentSuffix);
       if (version != version2) {
         throw new CorruptIndexException("Version mismatch between stored fields index and data: " + version + " != " + version2, vectorsStream);
       }
-      assert CodecUtil.segmentHeaderLength(codecNameDat, segmentSuffix) == vectorsStream.getFilePointer();
+      assert CodecUtil.indexHeaderLength(codecNameDat, segmentSuffix) == vectorsStream.getFilePointer();
       
       long pos = vectorsStream.getFilePointer();
       // NOTE: data file is too costly to verify checksum against all the bytes on open,

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java Fri Oct 24 02:43:41 2014
@@ -32,11 +32,8 @@ import org.apache.lucene.index.FieldInfo
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.MergeState;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.SegmentReader;
 import org.apache.lucene.store.BufferedChecksumIndexInput;
 import org.apache.lucene.store.ChecksumIndexInput;
 import org.apache.lucene.store.DataInput;
@@ -231,10 +228,10 @@ public final class CompressingTermVector
 
       final String codecNameIdx = formatName + CODEC_SFX_IDX;
       final String codecNameDat = formatName + CODEC_SFX_DAT;
-      CodecUtil.writeSegmentHeader(indexStream, codecNameIdx, VERSION_CURRENT, si.getId(), segmentSuffix);
-      CodecUtil.writeSegmentHeader(vectorsStream, codecNameDat, VERSION_CURRENT, si.getId(), segmentSuffix);
-      assert CodecUtil.segmentHeaderLength(codecNameDat, segmentSuffix) == vectorsStream.getFilePointer();
-      assert CodecUtil.segmentHeaderLength(codecNameIdx, segmentSuffix) == indexStream.getFilePointer();
+      CodecUtil.writeIndexHeader(indexStream, codecNameIdx, VERSION_CURRENT, si.getId(), segmentSuffix);
+      CodecUtil.writeIndexHeader(vectorsStream, codecNameDat, VERSION_CURRENT, si.getId(), segmentSuffix);
+      assert CodecUtil.indexHeaderLength(codecNameDat, segmentSuffix) == vectorsStream.getFilePointer();
+      assert CodecUtil.indexHeaderLength(codecNameIdx, segmentSuffix) == indexStream.getFilePointer();
 
       indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
       indexStream = null;

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java Fri Oct 24 02:43:41 2014
@@ -106,7 +106,7 @@ public class Lucene50Codec extends Codec
   /** Returns the postings format that should be used for writing 
    *  new segments of <code>field</code>.
    *  
-   *  The default implementation always returns "Lucene41"
+   *  The default implementation always returns "Lucene50"
    */
   public PostingsFormat getPostingsFormatForField(String field) {
     return defaultFormat;
@@ -115,7 +115,7 @@ public class Lucene50Codec extends Codec
   /** Returns the docvalues format that should be used for writing 
    *  new segments of <code>field</code>.
    *  
-   *  The default implementation always returns "Lucene410"
+   *  The default implementation always returns "Lucene50"
    */
   public DocValuesFormat getDocValuesFormatForField(String field) {
     return defaultDVFormat;
@@ -126,8 +126,8 @@ public class Lucene50Codec extends Codec
     return docValuesFormat;
   }
 
-  private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene41");
-  private final DocValuesFormat defaultDVFormat = DocValuesFormat.forName("Lucene410");
+  private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50");
+  private final DocValuesFormat defaultDVFormat = DocValuesFormat.forName("Lucene50");
 
   private final NormsFormat normsFormat = new Lucene50NormsFormat();
 

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java?rev=1633991&r1=1633990&r2=1633991&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java Fri Oct 24 02:43:41 2014
@@ -46,7 +46,7 @@ import org.apache.lucene.store.IndexOutp
  *   <li>Compound (.cfs) --&gt; Header, FileData <sup>FileCount</sup>, Footer</li>
  *   <li>Compound Entry Table (.cfe) --&gt; Header, FileCount, &lt;FileName,
  *       DataOffset, DataLength&gt; <sup>FileCount</sup></li>
- *   <li>Header --&gt; {@link CodecUtil#writeSegmentHeader SegmentHeader}</li>
+ *   <li>Header --&gt; {@link CodecUtil#writeIndexHeader IndexHeader}</li>
  *   <li>FileCount --&gt; {@link DataOutput#writeVInt VInt}</li>
  *   <li>DataOffset,DataLength,Checksum --&gt; {@link DataOutput#writeLong UInt64}</li>
  *   <li>FileName --&gt; {@link DataOutput#writeString String}</li>
@@ -79,8 +79,8 @@ public final class Lucene50CompoundForma
     
     try (IndexOutput data =    dir.createOutput(dataFile, context);
          IndexOutput entries = dir.createOutput(entriesFile, context)) {
-      CodecUtil.writeSegmentHeader(data,    DATA_CODEC, VERSION_CURRENT, si.getId(), "");
-      CodecUtil.writeSegmentHeader(entries, ENTRY_CODEC, VERSION_CURRENT, si.getId(), "");
+      CodecUtil.writeIndexHeader(data,    DATA_CODEC, VERSION_CURRENT, si.getId(), "");
+      CodecUtil.writeIndexHeader(entries, ENTRY_CODEC, VERSION_CURRENT, si.getId(), "");
       
       // write number of files
       entries.writeVInt(files.size());