You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2017/03/01 09:27:46 UTC

[38/50] [abbrv] lucene-solr:jira/solr-9858: LUCENE-7709: Remove unused backward compatibility logic.

LUCENE-7709: Remove unused backward compatibility logic.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/c7fd1437
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/c7fd1437
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/c7fd1437

Branch: refs/heads/jira/solr-9858
Commit: c7fd1437706a21d0571c5fced2e2e734563fa895
Parents: d9c0f25
Author: Adrien Grand <jp...@gmail.com>
Authored: Tue Feb 28 13:38:04 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Tue Feb 28 13:38:04 2017 +0100

----------------------------------------------------------------------
 .../codecs/blocktree/BlockTreeTermsReader.java  |  30 +---
 .../blocktree/IntersectTermsEnumFrame.java      |  70 ++-------
 .../codecs/blocktree/SegmentTermsEnumFrame.java | 154 ++++---------------
 .../CompressingStoredFieldsReader.java          |  19 +--
 .../CompressingStoredFieldsWriter.java          |   5 +-
 .../CompressingTermVectorsReader.java           |  19 +--
 .../CompressingTermVectorsWriter.java           |   5 +-
 7 files changed, 67 insertions(+), 235 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c7fd1437/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
index 6fc9a24..8d31f18 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
@@ -98,14 +98,7 @@ public final class BlockTreeTermsReader extends FieldsProducer {
   final static String TERMS_CODEC_NAME = "BlockTreeTermsDict";
 
   /** Initial terms format. */
-  public static final int VERSION_START = 0;
-
-  /** Auto-prefix terms. */
-  public static final int VERSION_AUTO_PREFIX_TERMS = 1;
-
-  /** Conditional auto-prefix terms: we record at write time whether
-   *  this field did write any auto-prefix terms. */
-  public static final int VERSION_AUTO_PREFIX_TERMS_COND = 2;
+  public static final int VERSION_START = 2;
 
   /** Auto-prefix terms have been superseded by points. */
   public static final int VERSION_AUTO_PREFIX_TERMS_REMOVED = 3;
@@ -138,8 +131,6 @@ public final class BlockTreeTermsReader extends FieldsProducer {
   
   final int version;
 
-  final boolean anyAutoPrefixTerms;
-
   /** Sole constructor. */
   public BlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state) throws IOException {
     boolean success = false;
@@ -153,22 +144,11 @@ public final class BlockTreeTermsReader extends FieldsProducer {
       termsIn = state.directory.openInput(termsName, state.context);
       version = CodecUtil.checkIndexHeader(termsIn, TERMS_CODEC_NAME, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
 
-      if (version < VERSION_AUTO_PREFIX_TERMS || version >= VERSION_AUTO_PREFIX_TERMS_REMOVED) {
-        // Old (pre-5.2.0) or recent (6.2.0+) index, no auto-prefix terms:
-        this.anyAutoPrefixTerms = false;
-      } else if (version == VERSION_AUTO_PREFIX_TERMS) {
-        // 5.2.x index, might have auto-prefix terms:
-        this.anyAutoPrefixTerms = true;
-      } else {
-        // 5.3.x index, we record up front if we may have written any auto-prefix terms:
-        assert version == VERSION_AUTO_PREFIX_TERMS_COND;
+      if (version < VERSION_AUTO_PREFIX_TERMS_REMOVED) {
+        // pre-6.2 index, records whether auto-prefix terms are enabled in the header
         byte b = termsIn.readByte();
-        if (b == 0) {
-          this.anyAutoPrefixTerms = false;
-        } else if (b == 1) {
-          this.anyAutoPrefixTerms = true;
-        } else {
-          throw new CorruptIndexException("invalid anyAutoPrefixTerms: expected 0 or 1 but got " + b, termsIn);
+        if (b != 0) {
+          throw new CorruptIndexException("Index header pretends the index has auto-prefix terms: " + b, termsIn);
         }
       }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c7fd1437/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java
index 3241075..578e145 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnumFrame.java
@@ -77,8 +77,6 @@ final class IntersectTermsEnumFrame {
   int transitionIndex;
   int transitionCount;
 
-  final boolean versionAutoPrefix;
-
   FST.Arc<BytesRef> arc;
 
   final BlockTermState termState;
@@ -116,7 +114,6 @@ final class IntersectTermsEnumFrame {
     this.termState = ite.fr.parent.postingsReader.newTermState();
     this.termState.totalTermFreq = -1;
     this.longs = new long[ite.fr.longsSize];
-    this.versionAutoPrefix = ite.fr.parent.anyAutoPrefixTerms;
   }
 
   void loadNextFloorBlock() throws IOException {
@@ -252,64 +249,17 @@ final class IntersectTermsEnumFrame {
     assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
     nextEnt++;
     final int code = suffixesReader.readVInt();
-    if (versionAutoPrefix == false) {
-      suffix = code >>> 1;
-      startBytePos = suffixesReader.getPosition();
-      suffixesReader.skipBytes(suffix);
-      if ((code & 1) == 0) {
-        // A normal term
-        termState.termBlockOrd++;
-        return false;
-      } else {
-        // A sub-block; make sub-FP absolute:
-        lastSubFP = fp - suffixesReader.readVLong();
-        return true;
-      }
+    suffix = code >>> 1;
+    startBytePos = suffixesReader.getPosition();
+    suffixesReader.skipBytes(suffix);
+    if ((code & 1) == 0) {
+      // A normal term
+      termState.termBlockOrd++;
+      return false;
     } else {
-      suffix = code >>> 2;
-      startBytePos = suffixesReader.getPosition();
-      suffixesReader.skipBytes(suffix);
-      switch (code & 3) {
-      case 0:
-        // A normal term
-        isAutoPrefixTerm = false;
-        termState.termBlockOrd++;
-        return false;
-      case 1:
-        // A sub-block; make sub-FP absolute:
-        isAutoPrefixTerm = false;
-        lastSubFP = fp - suffixesReader.readVLong();
-        return true;
-      case 2:
-        // A normal prefix term, suffix leads with empty string
-        floorSuffixLeadStart = -1;
-        termState.termBlockOrd++;
-        floorSuffixLeadEnd = suffixesReader.readByte() & 0xff;
-        if (floorSuffixLeadEnd == 0xff) {
-          floorSuffixLeadEnd = -1;
-        }
-        isAutoPrefixTerm = true;
-        return false;
-      case 3:
-        // A floor'd prefix term, suffix leads with real byte
-        if (suffix == 0) {
-          // TODO: this is messy, but necessary because we are an auto-prefix term, but our suffix is the empty string here, so we have to
-          // look at the parent block to get the lead suffix byte:
-          assert ord > 0;
-          IntersectTermsEnumFrame parent = ite.stack[ord-1];
-          floorSuffixLeadStart = parent.suffixBytes[parent.startBytePos+parent.suffix-1] & 0xff;
-        } else {
-          floorSuffixLeadStart = suffixBytes[startBytePos+suffix-1] & 0xff;
-        }
-        termState.termBlockOrd++;
-        isAutoPrefixTerm = true;
-        floorSuffixLeadEnd = suffixesReader.readByte() & 0xff;
-        return false;
-      default:
-        // Silly javac:
-        assert false;
-        return false;
-      }
+      // A sub-block; make sub-FP absolute:
+      lastSubFP = fp - suffixesReader.readVLong();
+      return true;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c7fd1437/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java
index a2abbaf..0860b30 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnumFrame.java
@@ -37,8 +37,6 @@ final class SegmentTermsEnumFrame {
 
   FST.Arc<BytesRef> arc;
 
-  final boolean versionAutoPrefix;
-
   //static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
 
   // File pointer where this block was loaded from
@@ -100,7 +98,6 @@ final class SegmentTermsEnumFrame {
     this.state = ste.fr.parent.postingsReader.newTermState();
     this.state.totalTermFreq = -1;
     this.longs = new long[ste.fr.longsSize];
-    this.versionAutoPrefix = ste.fr.parent.anyAutoPrefixTerms;
   }
 
   public void setFloorData(ByteArrayDataInput in, BytesRef source) {
@@ -302,58 +299,26 @@ final class SegmentTermsEnumFrame {
       assert nextEnt != -1 && nextEnt < entCount: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
       nextEnt++;
       final int code = suffixesReader.readVInt();
-      if (versionAutoPrefix == false) {
-        suffix = code >>> 1;
-        startBytePos = suffixesReader.getPosition();
-        ste.term.setLength(prefix + suffix);
-        ste.term.grow(ste.term.length());
-        suffixesReader.readBytes(ste.term.bytes(), prefix, suffix);
-        if ((code & 1) == 0) {
-          // A normal term
-          ste.termExists = true;
-          subCode = 0;
-          state.termBlockOrd++;
-          return false;
-        } else {
-          // A sub-block; make sub-FP absolute:
-          ste.termExists = false;
-          subCode = suffixesReader.readVLong();
-          lastSubFP = fp - subCode;
-          //if (DEBUG) {
-          //System.out.println("    lastSubFP=" + lastSubFP);
-          //}
-          return true;
-        }
+      suffix = code >>> 1;
+      startBytePos = suffixesReader.getPosition();
+      ste.term.setLength(prefix + suffix);
+      ste.term.grow(ste.term.length());
+      suffixesReader.readBytes(ste.term.bytes(), prefix, suffix);
+      if ((code & 1) == 0) {
+        // A normal term
+        ste.termExists = true;
+        subCode = 0;
+        state.termBlockOrd++;
+        return false;
       } else {
-        suffix = code >>> 2;
-        startBytePos = suffixesReader.getPosition();
-        ste.term.setLength(prefix + suffix);
-        ste.term.grow(ste.term.length());
-        suffixesReader.readBytes(ste.term.bytes(), prefix, suffix);
-
-        switch(code & 3) {
-        case 0:
-          // A normal term
-          ste.termExists = true;
-          subCode = 0;
-          state.termBlockOrd++;
-          return false;
-        case 1:
-          // A sub-block; make sub-FP absolute:
-          ste.termExists = false;
-          subCode = suffixesReader.readVLong();
-          lastSubFP = fp - subCode;
-          //if (DEBUG) {
-          //System.out.println("    lastSubFP=" + lastSubFP);
-          //}
-          return true;
-        case 2:
-        case 3:
-          // A prefix term: skip it
-          state.termBlockOrd++;
-          suffixesReader.readByte();
-          continue;
-        }
+        // A sub-block; make sub-FP absolute:
+        ste.termExists = false;
+        subCode = suffixesReader.readVLong();
+        lastSubFP = fp - subCode;
+        //if (DEBUG) {
+        //System.out.println("    lastSubFP=" + lastSubFP);
+        //}
+        return true;
       }
     }
   }
@@ -497,38 +462,16 @@ final class SegmentTermsEnumFrame {
       assert nextEnt < entCount;
       nextEnt++;
       final int code = suffixesReader.readVInt();
-      if (versionAutoPrefix == false) {
-        suffixesReader.skipBytes(code >>> 1);
-        if ((code & 1) != 0) {
-          final long subCode = suffixesReader.readVLong();
-          if (targetSubCode == subCode) {
-            //if (DEBUG) System.out.println("        match!");
-            lastSubFP = subFP;
-            return;
-          }
-        } else {
-          state.termBlockOrd++;
+      suffixesReader.skipBytes(code >>> 1);
+      if ((code & 1) != 0) {
+        final long subCode = suffixesReader.readVLong();
+        if (targetSubCode == subCode) {
+          //if (DEBUG) System.out.println("        match!");
+          lastSubFP = subFP;
+          return;
         }
       } else {
-        int flag = code & 3;
-        suffixesReader.skipBytes(code >>> 2);
-        //if (DEBUG) System.out.println("    " + nextEnt + " (of " + entCount + ") ent isSubBlock=" + ((code&1)==1));
-        if (flag == 1) {
-          // Sub-block
-          final long subCode = suffixesReader.readVLong();
-          //if (DEBUG) System.out.println("      subCode=" + subCode);
-          if (targetSubCode == subCode) {
-            //if (DEBUG) System.out.println("        match!");
-            lastSubFP = subFP;
-            return;
-          }
-        } else {
-          state.termBlockOrd++;
-          if (flag == 2 || flag == 3) {
-            // Floor'd prefix term
-            suffixesReader.readByte();
-          }
-        }
+        state.termBlockOrd++;
       }
     }
   }
@@ -691,11 +634,7 @@ final class SegmentTermsEnumFrame {
       nextEnt++;
 
       final int code = suffixesReader.readVInt();
-      if (versionAutoPrefix == false) {
-        suffix = code >>> 1;
-      } else {
-        suffix = code >>> 2;
-      }
+      suffix = code >>> 1;
 
       //if (DEBUG) {
       //  BytesRef suffixBytesRef = new BytesRef();
@@ -708,38 +647,13 @@ final class SegmentTermsEnumFrame {
       final int termLen = prefix + suffix;
       startBytePos = suffixesReader.getPosition();
       suffixesReader.skipBytes(suffix);
-      if (versionAutoPrefix == false) {
-        ste.termExists = (code & 1) == 0;
-        if (ste.termExists) {
-          state.termBlockOrd++;
-          subCode = 0;
-        } else {
-          subCode = suffixesReader.readVLong();
-          lastSubFP = fp - subCode;
-        }
+      ste.termExists = (code & 1) == 0;
+      if (ste.termExists) {
+        state.termBlockOrd++;
+        subCode = 0;
       } else {
-        switch (code & 3) {
-        case 0:
-          // Normal term
-          ste.termExists = true;
-          state.termBlockOrd++;
-          subCode = 0;
-          break;
-        case 1:
-          // Sub-block
-          ste.termExists = false;
-          subCode = suffixesReader.readVLong();
-          lastSubFP = fp - subCode;
-          break;
-        case 2:
-        case 3:
-          // Floor prefix term: skip it
-          //if (DEBUG) System.out.println("        skip floor prefix term");
-          suffixesReader.readByte();
-          ste.termExists = false;
-          state.termBlockOrd++;
-          continue;
-        }
+        subCode = suffixesReader.readVLong();
+        lastSubFP = fp - subCode;
       }
 
       final int targetLimit = target.offset + (target.length < termLen ? target.length : termLen);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c7fd1437/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
index f496928..62508f8 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
@@ -36,7 +36,6 @@ import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter
 import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.TYPE_BITS;
 import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.TYPE_MASK;
 import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.VERSION_CURRENT;
-import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.VERSION_CHUNK_STATS;
 import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.VERSION_START;
 
 import java.io.EOFException;
@@ -161,18 +160,14 @@ public final class CompressingStoredFieldsReader extends StoredFieldsReader {
       decompressor = compressionMode.newDecompressor();
       this.merging = false;
       this.state = new BlockState();
-      
-      if (version >= VERSION_CHUNK_STATS) {
-        fieldsStream.seek(maxPointer);
-        numChunks = fieldsStream.readVLong();
-        numDirtyChunks = fieldsStream.readVLong();
-        if (numDirtyChunks > numChunks) {
-          throw new CorruptIndexException("invalid chunk counts: dirty=" + numDirtyChunks + ", total=" + numChunks, fieldsStream);
-        }
-      } else {
-        numChunks = numDirtyChunks = -1;
+
+      fieldsStream.seek(maxPointer);
+      numChunks = fieldsStream.readVLong();
+      numDirtyChunks = fieldsStream.readVLong();
+      if (numDirtyChunks > numChunks) {
+        throw new CorruptIndexException("invalid chunk counts: dirty=" + numDirtyChunks + ", total=" + numChunks, fieldsStream);
       }
-      
+
       // NOTE: data file is too costly to verify checksum against all the bytes on open,
       // but for now we at least verify proper structure of the checksum footer: which looks
       // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c7fd1437/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
index 5b42870..8cd8ccb 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
@@ -73,9 +73,8 @@ public final class CompressingStoredFieldsWriter extends StoredFieldsWriter {
 
   static final String CODEC_SFX_IDX = "Index";
   static final String CODEC_SFX_DAT = "Data";
-  static final int VERSION_START = 0;
-  static final int VERSION_CHUNK_STATS = 1;
-  static final int VERSION_CURRENT = VERSION_CHUNK_STATS;
+  static final int VERSION_START = 1;
+  static final int VERSION_CURRENT = VERSION_START;
 
   private final String segment;
   private CompressingStoredFieldsIndexWriter indexWriter;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c7fd1437/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
index f0d1640..aa19f20 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
@@ -59,7 +59,6 @@ import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.
 import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.POSITIONS;
 import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_EXTENSION;
 import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_INDEX_EXTENSION;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CHUNK_STATS;
 import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CURRENT;
 import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_START;
 
@@ -148,18 +147,14 @@ public final class CompressingTermVectorsReader extends TermVectorsReader implem
       assert CodecUtil.indexHeaderLength(codecNameDat, segmentSuffix) == vectorsStream.getFilePointer();
       
       long pos = vectorsStream.getFilePointer();
-      
-      if (version >= VERSION_CHUNK_STATS) {
-        vectorsStream.seek(maxPointer);
-        numChunks = vectorsStream.readVLong();
-        numDirtyChunks = vectorsStream.readVLong();
-        if (numDirtyChunks > numChunks) {
-          throw new CorruptIndexException("invalid chunk counts: dirty=" + numDirtyChunks + ", total=" + numChunks, vectorsStream);
-        }
-      } else {
-        numChunks = numDirtyChunks = -1;
+
+      vectorsStream.seek(maxPointer);
+      numChunks = vectorsStream.readVLong();
+      numDirtyChunks = vectorsStream.readVLong();
+      if (numDirtyChunks > numChunks) {
+        throw new CorruptIndexException("invalid chunk counts: dirty=" + numDirtyChunks + ", total=" + numChunks, vectorsStream);
       }
-      
+
       // NOTE: data file is too costly to verify checksum against all the bytes on open,
       // but for now we at least verify proper structure of the checksum footer: which looks
       // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c7fd1437/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
index 9bd2483..26fe890 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
@@ -64,9 +64,8 @@ public final class CompressingTermVectorsWriter extends TermVectorsWriter {
   static final String CODEC_SFX_IDX = "Index";
   static final String CODEC_SFX_DAT = "Data";
 
-  static final int VERSION_START = 0;
-  static final int VERSION_CHUNK_STATS = 1;
-  static final int VERSION_CURRENT = VERSION_CHUNK_STATS;
+  static final int VERSION_START = 1;
+  static final int VERSION_CURRENT = VERSION_START;
 
   static final int PACKED_BLOCK_SIZE = 64;