You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2012/08/07 17:57:58 UTC

svn commit: r1370328 - in /lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs: block/ blockpacked/

Author: mikemccand
Date: Tue Aug  7 15:57:58 2012
New Revision: 1370328

URL: http://svn.apache.org/viewvc?rev=1370328&view=rev
Log:
carry back some code improvements from BlockPacked -> Block

Modified:
    lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsReader.java
    lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsWriter.java
    lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockSkipWriter.java
    lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedPostingsWriter.java
    lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedSkipWriter.java

Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsReader.java?rev=1370328&r1=1370327&r2=1370328&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsReader.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsReader.java Tue Aug  7 15:57:58 2012
@@ -43,6 +43,7 @@ import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
+import static org.apache.lucene.codecs.blockpacked.BlockPackedPostingsFormat.BLOCK_SIZE;
 
 /**
  * Concrete class that reads docId(maybe frq,pos,offset,payloads) list
@@ -62,9 +63,6 @@ public final class BlockPostingsReader e
   // nocommit
   final String segment;
 
-  // NOTE: not private to avoid access$NNN methods:
-  final static int blockSize = BlockPostingsFormat.BLOCK_SIZE;
-
   public BlockPostingsReader(Directory dir, FieldInfos fieldInfos, SegmentInfo segmentInfo, IOContext ioContext, String segmentSuffix) throws IOException {
     boolean success = false;
     segment = segmentInfo.name;
@@ -116,8 +114,8 @@ public final class BlockPostingsReader e
                           BlockPostingsWriter.VERSION_START,
                           BlockPostingsWriter.VERSION_START);
     final int indexBlockSize = termsIn.readVInt();
-    if (indexBlockSize != blockSize) {
-      throw new IllegalStateException("index-time blockSize (" + indexBlockSize + ") != read-time blockSize (" + blockSize + ")");
+    if (indexBlockSize != BLOCK_SIZE) {
+      throw new IllegalStateException("index-time BLOCK_SIZE (" + indexBlockSize + ") != read-time BLOCK_SIZE (" + BLOCK_SIZE + ")");
     }
   }
 
@@ -235,12 +233,12 @@ public final class BlockPostingsReader e
       termState.docStartFP = in.readVLong();
       if (fieldHasPositions) {
         termState.posStartFP = in.readVLong();
-        if (termState.totalTermFreq > blockSize) {
+        if (termState.totalTermFreq > BLOCK_SIZE) {
           termState.lastPosBlockOffset = in.readVInt();
         } else {
           termState.lastPosBlockOffset = -1;
         }
-        if ((fieldHasPayloads || fieldHasOffsets) && termState.totalTermFreq >= blockSize) {
+        if ((fieldHasPayloads || fieldHasOffsets) && termState.totalTermFreq >= BLOCK_SIZE) {
           termState.payStartFP = in.readVLong();
         } else {
           termState.payStartFP = -1;
@@ -250,12 +248,12 @@ public final class BlockPostingsReader e
       termState.docStartFP += in.readVLong();
       if (fieldHasPositions) {
         termState.posStartFP += in.readVLong();
-        if (termState.totalTermFreq > blockSize) {
+        if (termState.totalTermFreq > BLOCK_SIZE) {
           termState.lastPosBlockOffset = in.readVInt();
         } else {
           termState.lastPosBlockOffset = -1;
         }
-        if ((fieldHasPayloads || fieldHasOffsets) && termState.totalTermFreq >= blockSize) {
+        if ((fieldHasPayloads || fieldHasOffsets) && termState.totalTermFreq >= BLOCK_SIZE) {
           long delta = in.readVLong();
           if (termState.payStartFP == -1) {
             termState.payStartFP = delta;
@@ -266,7 +264,7 @@ public final class BlockPostingsReader e
       }
     }
 
-    if (termState.docFreq > blockSize) {
+    if (termState.docFreq > BLOCK_SIZE) {
       termState.skipOffset = in.readVInt();
     } else {
       termState.skipOffset = -1;
@@ -327,8 +325,8 @@ public final class BlockPostingsReader e
     private final byte[] encoded;
     private final IntBuffer encodedBuffer;
     
-    private final int[] docDeltaBuffer = new int[blockSize];
-    private final int[] freqBuffer = new int[blockSize];
+    private final int[] docDeltaBuffer = new int[BLOCK_SIZE];
+    private final int[] freqBuffer = new int[BLOCK_SIZE];
 
     private int docBufferUpto;
 
@@ -366,7 +364,7 @@ public final class BlockPostingsReader e
       indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
       indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
       indexHasPayloads = fieldInfo.hasPayloads();
-      encoded = new byte[blockSize*4];
+      encoded = new byte[BLOCK_SIZE*4];
       encodedBuffer = ByteBuffer.wrap(encoded).asIntBuffer();      
     }
 
@@ -393,7 +391,7 @@ public final class BlockPostingsReader e
       }
       accum = 0;
       docUpto = 0;
-      docBufferUpto = blockSize;
+      docBufferUpto = BLOCK_SIZE;
       skipped = false;
       return this;
     }
@@ -413,7 +411,7 @@ public final class BlockPostingsReader e
       final int left = docFreq - docUpto;
       assert left > 0;
 
-      if (left >= blockSize) {
+      if (left >= BLOCK_SIZE) {
         if (DEBUG) {
           System.out.println("    fill doc block from fp=" + docIn.getFilePointer());
         }
@@ -449,7 +447,7 @@ public final class BlockPostingsReader e
           return doc = NO_MORE_DOCS;
         }
         //System.out.println("["+docFreq+"]"+" nextDoc");
-        if (docBufferUpto == blockSize) {
+        if (docBufferUpto == BLOCK_SIZE) {
           refillDocs();
         }
         if (DEBUG) {
@@ -480,7 +478,7 @@ public final class BlockPostingsReader e
 
       // nocommit use skipper!!!  it has next last doc id!!
 
-      if (docFreq > blockSize && target - accum > blockSize) {
+      if (docFreq > BLOCK_SIZE && target - accum > BLOCK_SIZE) {
 
         if (DEBUG) {
           System.out.println("load skipper");
@@ -490,7 +488,7 @@ public final class BlockPostingsReader e
           // Lazy init: first time this enum has ever been used for skipping
           skipper = new BlockSkipReader((IndexInput) docIn.clone(),
                                         BlockPostingsWriter.maxSkipLevels,
-                                        blockSize,
+                                        BLOCK_SIZE,
                                         indexHasPos,
                                         indexHasOffsets,
                                         indexHasPayloads);
@@ -511,11 +509,11 @@ public final class BlockPostingsReader e
           if (DEBUG) {
             System.out.println("skipper moved to docUpto=" + newDocUpto + " vs current=" + docUpto + "; docID=" + skipper.getDoc() + " fp=" + skipper.getDocPointer());
           }
-          assert newDocUpto % blockSize == (blockSize-1): "got " + newDocUpto;
+          assert newDocUpto % BLOCK_SIZE == (BLOCK_SIZE-1): "got " + newDocUpto;
           docUpto = newDocUpto+1;
 
           // Force to read next block
-          docBufferUpto = blockSize;
+          docBufferUpto = BLOCK_SIZE;
           accum = skipper.getDoc();               // actually, this is just lastSkipEntry
           docIn.seek(skipper.getDocPointer());    // now point to the block we want to search
         }
@@ -536,7 +534,7 @@ public final class BlockPostingsReader e
         // containing the doc?  yet assert false trips ... i
         // think because if you advance w/o having done a
         // nextDoc yet()... can we assert/remove this?
-        if (docBufferUpto == blockSize) {
+        if (docBufferUpto == BLOCK_SIZE) {
           refillDocs();
         }
         accum += docDeltaBuffer[docBufferUpto];
@@ -571,9 +569,9 @@ public final class BlockPostingsReader e
     private final byte[] encoded;
     private final IntBuffer encodedBuffer;
 
-    private final int[] docDeltaBuffer = new int[blockSize];
-    private final int[] freqBuffer = new int[blockSize];
-    private final int[] posDeltaBuffer = new int[blockSize];
+    private final int[] docDeltaBuffer = new int[BLOCK_SIZE];
+    private final int[] freqBuffer = new int[BLOCK_SIZE];
+    private final int[] posDeltaBuffer = new int[BLOCK_SIZE];
 
     private int docBufferUpto;
     private int posBufferUpto;
@@ -630,7 +628,7 @@ public final class BlockPostingsReader e
       this.startDocIn = BlockPostingsReader.this.docIn;
       this.docIn = (IndexInput) startDocIn.clone();
       this.posIn = (IndexInput) BlockPostingsReader.this.posIn.clone();
-      encoded = new byte[blockSize*4];
+      encoded = new byte[BLOCK_SIZE*4];
       encodedBuffer = ByteBuffer.wrap(encoded).asIntBuffer();
       indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
       indexHasPayloads = fieldInfo.hasPayloads();
@@ -655,9 +653,9 @@ public final class BlockPostingsReader e
       skipOffset = termState.skipOffset;
       posPendingFP = posTermStartFP;
       posPendingCount = 0;
-      if (termState.totalTermFreq < blockSize) {
+      if (termState.totalTermFreq < BLOCK_SIZE) {
         lastPosBlockFP = posTermStartFP;
-      } else if (termState.totalTermFreq == blockSize) {
+      } else if (termState.totalTermFreq == BLOCK_SIZE) {
         lastPosBlockFP = -1;
       } else {
         lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
@@ -666,7 +664,7 @@ public final class BlockPostingsReader e
       doc = -1;
       accum = 0;
       docUpto = 0;
-      docBufferUpto = blockSize;
+      docBufferUpto = BLOCK_SIZE;
       skipped = false;
       return this;
     }
@@ -685,7 +683,7 @@ public final class BlockPostingsReader e
       //System.out.println("["+docFreq+"]"+" refillDoc");
       final int left = docFreq - docUpto;
       assert left > 0;
-      if (left >= blockSize) {
+      if (left >= BLOCK_SIZE) {
         if (DEBUG) {
           System.out.println("    fill doc block from fp=" + docIn.getFilePointer());
         }
@@ -752,7 +750,7 @@ public final class BlockPostingsReader e
           return doc = NO_MORE_DOCS;
         }
         //System.out.println("["+docFreq+"]"+" nextDoc");
-        if (docBufferUpto == blockSize) {
+        if (docBufferUpto == BLOCK_SIZE) {
           refillDocs();
         }
         if (DEBUG) {
@@ -788,8 +786,8 @@ public final class BlockPostingsReader e
       // nocommit 2 is heuristic guess!!
       // nocommit put cheating back!  does it help?
       // nocommit use skipper!!!  it has next last doc id!!
-      //if (docFreq > blockSize && target - (blockSize - docBufferUpto) - 2*blockSize > accum) {
-      if (docFreq > blockSize && target - accum > blockSize) {
+      //if (docFreq > BLOCK_SIZE && target - (BLOCK_SIZE - docBufferUpto) - 2*BLOCK_SIZE > accum) {
+      if (docFreq > BLOCK_SIZE && target - accum > BLOCK_SIZE) {
         if (DEBUG) {
           System.out.println("    try skipper");
         }
@@ -800,7 +798,7 @@ public final class BlockPostingsReader e
           }
           skipper = new BlockSkipReader((IndexInput) docIn.clone(),
                                         BlockPostingsWriter.maxSkipLevels,
-                                        blockSize,
+                                        BLOCK_SIZE,
                                         true,
                                         indexHasOffsets,
                                         indexHasPayloads);
@@ -825,11 +823,11 @@ public final class BlockPostingsReader e
             System.out.println("    skipper moved to docUpto=" + newDocUpto + " vs current=" + docUpto + "; docID=" + skipper.getDoc() + " fp=" + skipper.getDocPointer() + " pos.fp=" + skipper.getPosPointer() + " pos.bufferUpto=" + skipper.getPosBufferUpto());
           }
 
-          assert newDocUpto % blockSize == (blockSize-1): "got " + newDocUpto;
+          assert newDocUpto % BLOCK_SIZE == (BLOCK_SIZE-1): "got " + newDocUpto;
           docUpto = newDocUpto+1;
 
           // Force to read next block
-          docBufferUpto = blockSize;
+          docBufferUpto = BLOCK_SIZE;
           accum = skipper.getDoc();
           docIn.seek(skipper.getDocPointer());
           posPendingFP = skipper.getPosPointer();
@@ -851,7 +849,7 @@ public final class BlockPostingsReader e
         // containing the doc?  yet assert false trips ... i
         // think because if you advance w/o having done a
         // nextDoc yet()... can we assert/remove this?
-        if (docBufferUpto == blockSize) {
+        if (docBufferUpto == BLOCK_SIZE) {
           // nocommit hmm skip freq?  but: we don't ever
           // scan over more than one block?
           refillDocs();
@@ -892,7 +890,7 @@ public final class BlockPostingsReader e
         System.out.println("      FPR.skipPositions: toSkip=" + toSkip);
       }
 
-      final int leftInBlock = blockSize - posBufferUpto;
+      final int leftInBlock = BLOCK_SIZE - posBufferUpto;
       if (toSkip < leftInBlock) {
         posBufferUpto += toSkip;
         if (DEBUG) {
@@ -900,13 +898,13 @@ public final class BlockPostingsReader e
         }
       } else {
         toSkip -= leftInBlock;
-        while(toSkip >= blockSize) {
+        while(toSkip >= BLOCK_SIZE) {
           if (DEBUG) {
             System.out.println("        skip whole block @ fp=" + posIn.getFilePointer());
           }
           assert posIn.getFilePointer() != lastPosBlockFP;
           skipBlock(posIn);
-          toSkip -= blockSize;
+          toSkip -= BLOCK_SIZE;
         }
         refillPositions();
         posBufferUpto = toSkip;
@@ -931,7 +929,7 @@ public final class BlockPostingsReader e
         posPendingFP = -1;
 
         // Force buffer refill:
-        posBufferUpto = blockSize;
+        posBufferUpto = BLOCK_SIZE;
       }
 
       if (posPendingCount > freq) {
@@ -939,7 +937,7 @@ public final class BlockPostingsReader e
         posPendingCount = freq;
       }
 
-      if (posBufferUpto == blockSize) {
+      if (posBufferUpto == BLOCK_SIZE) {
         refillPositions();
         posBufferUpto = 0;
       }
@@ -978,9 +976,9 @@ public final class BlockPostingsReader e
     private final byte[] encoded;
     private final IntBuffer encodedBuffer;
 
-    private final int[] docDeltaBuffer = new int[blockSize];
-    private final int[] freqBuffer = new int[blockSize];
-    private final int[] posDeltaBuffer = new int[blockSize];
+    private final int[] docDeltaBuffer = new int[BLOCK_SIZE];
+    private final int[] freqBuffer = new int[BLOCK_SIZE];
+    private final int[] posDeltaBuffer = new int[BLOCK_SIZE];
 
     private final int[] payloadLengthBuffer;
     private final int[] offsetStartDeltaBuffer;
@@ -1056,12 +1054,12 @@ public final class BlockPostingsReader e
       this.docIn = (IndexInput) startDocIn.clone();
       this.posIn = (IndexInput) BlockPostingsReader.this.posIn.clone();
       this.payIn = (IndexInput) BlockPostingsReader.this.payIn.clone();
-      encoded = new byte[blockSize*4];
+      encoded = new byte[BLOCK_SIZE*4];
       encodedBuffer = ByteBuffer.wrap(encoded).asIntBuffer();
       indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
       if (indexHasOffsets) {
-        offsetStartDeltaBuffer = new int[blockSize];
-        offsetLengthBuffer = new int[blockSize];
+        offsetStartDeltaBuffer = new int[BLOCK_SIZE];
+        offsetLengthBuffer = new int[BLOCK_SIZE];
       } else {
         offsetStartDeltaBuffer = null;
         offsetLengthBuffer = null;
@@ -1071,7 +1069,7 @@ public final class BlockPostingsReader e
 
       indexHasPayloads = fieldInfo.hasPayloads();
       if (indexHasPayloads) {
-        payloadLengthBuffer = new int[blockSize];
+        payloadLengthBuffer = new int[BLOCK_SIZE];
         payloadBytes = new byte[128];
         payload = new BytesRef();
       } else {
@@ -1101,9 +1099,9 @@ public final class BlockPostingsReader e
       posPendingFP = posTermStartFP;
       payPendingFP = payTermStartFP;
       posPendingCount = 0;
-      if (termState.totalTermFreq < blockSize) {
+      if (termState.totalTermFreq < BLOCK_SIZE) {
         lastPosBlockFP = posTermStartFP;
-      } else if (termState.totalTermFreq == blockSize) {
+      } else if (termState.totalTermFreq == BLOCK_SIZE) {
         lastPosBlockFP = -1;
       } else {
         lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
@@ -1112,7 +1110,7 @@ public final class BlockPostingsReader e
       doc = -1;
       accum = 0;
       docUpto = 0;
-      docBufferUpto = blockSize;
+      docBufferUpto = BLOCK_SIZE;
       skipped = false;
       return this;
     }
@@ -1132,7 +1130,7 @@ public final class BlockPostingsReader e
       final int left = docFreq - docUpto;
       assert left > 0;
 
-      if (left >= blockSize) {
+      if (left >= BLOCK_SIZE) {
         if (DEBUG) {
           System.out.println("    fill doc block from fp=" + docIn.getFilePointer());
         }
@@ -1245,7 +1243,7 @@ public final class BlockPostingsReader e
           return doc = NO_MORE_DOCS;
         }
         //System.out.println("["+docFreq+"]"+" nextDoc");
-        if (docBufferUpto == blockSize) {
+        if (docBufferUpto == BLOCK_SIZE) {
           refillDocs();
         }
         if (DEBUG) {
@@ -1283,8 +1281,8 @@ public final class BlockPostingsReader e
       // nocommit 2 is heuristic guess!!
       // nocommit put cheating back!  does it help?
       // nocommit use skipper!!!  it has next last doc id!!
-      //if (docFreq > blockSize && target - (blockSize - docBufferUpto) - 2*blockSize > accum) {
-      if (docFreq > blockSize && target - accum > blockSize) {
+      //if (docFreq > BLOCK_SIZE && target - (BLOCK_SIZE - docBufferUpto) - 2*BLOCK_SIZE > accum) {
+      if (docFreq > BLOCK_SIZE && target - accum > BLOCK_SIZE) {
 
         if (DEBUG) {
           System.out.println("    try skipper");
@@ -1297,7 +1295,7 @@ public final class BlockPostingsReader e
           }
           skipper = new BlockSkipReader((IndexInput) docIn.clone(),
                                         BlockPostingsWriter.maxSkipLevels,
-                                        blockSize,
+                                        BLOCK_SIZE,
                                         true,
                                         indexHasOffsets,
                                         indexHasPayloads);
@@ -1321,11 +1319,11 @@ public final class BlockPostingsReader e
           if (DEBUG) {
             System.out.println("    skipper moved to docUpto=" + newDocUpto + " vs current=" + docUpto + "; docID=" + skipper.getDoc() + " fp=" + skipper.getDocPointer() + " pos.fp=" + skipper.getPosPointer() + " pos.bufferUpto=" + skipper.getPosBufferUpto() + " pay.fp=" + skipper.getPayPointer() + " lastStartOffset=" + lastStartOffset);
           }
-          assert newDocUpto % blockSize == (blockSize-1): "got " + newDocUpto;
+          assert newDocUpto % BLOCK_SIZE == (BLOCK_SIZE-1): "got " + newDocUpto;
           docUpto = newDocUpto+1;
 
           // Force to read next block
-          docBufferUpto = blockSize;
+          docBufferUpto = BLOCK_SIZE;
           accum = skipper.getDoc();
           docIn.seek(skipper.getDocPointer());
           posPendingFP = skipper.getPosPointer();
@@ -1366,7 +1364,7 @@ public final class BlockPostingsReader e
         System.out.println("      FPR.skipPositions: toSkip=" + toSkip);
       }
 
-      final int leftInBlock = blockSize - posBufferUpto;
+      final int leftInBlock = BLOCK_SIZE - posBufferUpto;
       if (toSkip < leftInBlock) {
         int end = posBufferUpto + toSkip;
         while(posBufferUpto < end) {
@@ -1383,7 +1381,7 @@ public final class BlockPostingsReader e
         }
       } else {
         toSkip -= leftInBlock;
-        while(toSkip >= blockSize) {
+        while(toSkip >= BLOCK_SIZE) {
           if (DEBUG) {
             System.out.println("        skip whole block @ fp=" + posIn.getFilePointer());
           }
@@ -1404,11 +1402,11 @@ public final class BlockPostingsReader e
             // up into lastStartOffset:
             readBlock(payIn, encoded, encodedBuffer, offsetStartDeltaBuffer);
             readBlock(payIn, encoded, encodedBuffer, offsetLengthBuffer);
-            for(int i=0;i<blockSize;i++) {
+            for(int i=0;i<BLOCK_SIZE;i++) {
               lastStartOffset += offsetStartDeltaBuffer[i] + offsetLengthBuffer[i];
             }
           }
-          toSkip -= blockSize;
+          toSkip -= BLOCK_SIZE;
         }
         refillPositions();
         payloadByteUpto = 0;
@@ -1455,7 +1453,7 @@ public final class BlockPostingsReader e
         }
 
         // Force buffer refill:
-        posBufferUpto = blockSize;
+        posBufferUpto = BLOCK_SIZE;
       }
 
       if (indexHasPayloads) {
@@ -1473,7 +1471,7 @@ public final class BlockPostingsReader e
         posPendingCount = freq;
       }
 
-      if (posBufferUpto == blockSize) {
+      if (posBufferUpto == BLOCK_SIZE) {
         refillPositions();
         posBufferUpto = 0;
       }

Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsWriter.java?rev=1370328&r1=1370327&r2=1370328&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsWriter.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockPostingsWriter.java Tue Aug  7 15:57:58 2012
@@ -37,6 +37,8 @@ import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
+import static org.apache.lucene.codecs.blockpacked.BlockPackedPostingsFormat.BLOCK_SIZE;
+
 /**
  * Concrete class that writes docId(maybe frq,pos,offset,payloads) list
  * with postings format.
@@ -67,8 +69,6 @@ public final class BlockPostingsWriter e
   final IndexOutput posOut;
   final IndexOutput payOut;
 
-  final static int blockSize = BlockPostingsFormat.BLOCK_SIZE;
-
   private IndexOutput termsOut;
 
   // How current field indexes postings:
@@ -123,22 +123,22 @@ public final class BlockPostingsWriter e
     try {
       CodecUtil.writeHeader(docOut, DOC_CODEC, VERSION_CURRENT);
       if (state.fieldInfos.hasProx()) {
-        posDeltaBuffer = new int[blockSize];
+        posDeltaBuffer = new int[BLOCK_SIZE];
         posOut = state.directory.createOutput(IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, BlockPostingsFormat.POS_EXTENSION),
                                               state.context);
         CodecUtil.writeHeader(posOut, POS_CODEC, VERSION_CURRENT);
 
         if (state.fieldInfos.hasPayloads()) {
           payloadBytes = new byte[128];
-          payloadLengthBuffer = new int[blockSize];
+          payloadLengthBuffer = new int[BLOCK_SIZE];
         } else {
           payloadBytes = null;
           payloadLengthBuffer = null;
         }
 
         if (state.fieldInfos.hasOffsets()) {
-          offsetStartDeltaBuffer = new int[blockSize];
-          offsetLengthBuffer = new int[blockSize];
+          offsetStartDeltaBuffer = new int[BLOCK_SIZE];
+          offsetLengthBuffer = new int[BLOCK_SIZE];
         } else {
           offsetStartDeltaBuffer = null;
           offsetLengthBuffer = null;
@@ -165,17 +165,17 @@ public final class BlockPostingsWriter e
       }
     }
 
-    docDeltaBuffer = new int[blockSize];
-    freqBuffer = new int[blockSize];
+    docDeltaBuffer = new int[BLOCK_SIZE];
+    freqBuffer = new int[BLOCK_SIZE];
 
     skipWriter = new BlockSkipWriter(maxSkipLevels, 
-                                     blockSize,
+                                     BLOCK_SIZE,
                                      state.segmentInfo.getDocCount(),
                                      docOut,
                                      posOut,
                                      payOut);
 
-    encoded = new byte[blockSize*4];
+    encoded = new byte[BLOCK_SIZE*4];
     encodedBuffer = ByteBuffer.wrap(encoded).asIntBuffer();
   }
 
@@ -183,7 +183,7 @@ public final class BlockPostingsWriter e
   public void start(IndexOutput termsOut) throws IOException {
     this.termsOut = termsOut;
     CodecUtil.writeHeader(termsOut, TERMS_CODEC, VERSION_CURRENT);
-    termsOut.writeVInt(blockSize);
+    termsOut.writeVInt(BLOCK_SIZE);
   }
 
   @Override
@@ -240,7 +240,7 @@ public final class BlockPostingsWriter e
     docBufferUpto++;
     docCount++;
 
-    if (docBufferUpto == blockSize) {
+    if (docBufferUpto == BLOCK_SIZE) {
       if (DEBUG) {
         System.out.println("  write docDelta block @ fp=" + docOut.getFilePointer());
       }
@@ -291,7 +291,7 @@ public final class BlockPostingsWriter e
     
     posBufferUpto++;
     lastPosition = position;
-    if (posBufferUpto == blockSize) {
+    if (posBufferUpto == BLOCK_SIZE) {
       if (DEBUG) {
         System.out.println("  write pos bulk block @ fp=" + posOut.getFilePointer());
       }
@@ -329,7 +329,7 @@ public final class BlockPostingsWriter e
     // Since we don't know df for current term, we had to buffer
     // those skip data for each block, and when a new doc comes, 
     // write them to skip file.
-    if (docBufferUpto == blockSize) {
+    if (docBufferUpto == BLOCK_SIZE) {
       lastBlockDocID = lastDocID;
       if (posOut != null) {
         if (payOut != null) {
@@ -408,7 +408,7 @@ public final class BlockPostingsWriter e
       }
 
       assert stats.totalTermFreq != -1;
-      if (stats.totalTermFreq > blockSize) {
+      if (stats.totalTermFreq > BLOCK_SIZE) {
         lastPosBlockOffset = (int) (posOut.getFilePointer() - posTermStartFP);
       } else {
         lastPosBlockOffset = -1;
@@ -418,7 +418,7 @@ public final class BlockPostingsWriter e
         
         // nocommit should we send offsets/payloads to
         // .pay...?  seems wasteful (have to store extra
-        // vLong for low (< blockSize) DF terms = vast vast
+        // vLong for low (< BLOCK_SIZE) DF terms = vast vast
         // majority)
 
         // vInt encode the remaining positions/payloads/offsets:
@@ -473,7 +473,7 @@ public final class BlockPostingsWriter e
     }
 
     int skipOffset;
-    if (docCount > blockSize) {
+    if (docCount > BLOCK_SIZE) {
       skipOffset = (int) (skipWriter.writeSkip(docOut)-docTermStartFP);
       
       if (DEBUG) {
@@ -487,7 +487,7 @@ public final class BlockPostingsWriter e
     }
 
     long payStartFP;
-    if (stats.totalTermFreq >= blockSize) {
+    if (stats.totalTermFreq >= BLOCK_SIZE) {
       payStartFP = payTermStartFP;
     } else {
       payStartFP = -1;

Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockSkipWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockSkipWriter.java?rev=1370328&r1=1370327&r2=1370328&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockSkipWriter.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/block/BlockSkipWriter.java Tue Aug  7 15:57:58 2012
@@ -69,8 +69,6 @@ final class BlockSkipWriter extends Mult
   private boolean fieldHasPayloads;
 
   public BlockSkipWriter(int maxSkipLevels, int blockSize, int docCount, IndexOutput docOut, IndexOutput posOut, IndexOutput payOut) {
-    // nocommit figure out what skipMultiplier is best (4 is
-    // total guess):
     super(blockSize, 8, maxSkipLevels, docCount);
     this.docOut = docOut;
     this.posOut = posOut;

Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedPostingsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedPostingsWriter.java?rev=1370328&r1=1370327&r2=1370328&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedPostingsWriter.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedPostingsWriter.java Tue Aug  7 15:57:58 2012
@@ -411,7 +411,7 @@ public final class BlockPackedPostingsWr
         
         // nocommit should we send offsets/payloads to
         // .pay...?  seems wasteful (have to store extra
-        // vLong for low (< blockSize) DF terms = vast vast
+        // vLong for low (< BLOCK_SIZE) DF terms = vast vast
         // majority)
 
         // vInt encode the remaining positions/payloads/offsets:

Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedSkipWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedSkipWriter.java?rev=1370328&r1=1370327&r2=1370328&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedSkipWriter.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/codecs/blockpacked/BlockPackedSkipWriter.java Tue Aug  7 15:57:58 2012
@@ -24,25 +24,25 @@ import org.apache.lucene.store.IndexOutp
 import org.apache.lucene.codecs.MultiLevelSkipListWriter;
 
 /**
-* Write skip lists with multiple levels, and support skip within block ints.
-*
-* Assume that docFreq = 28, skipInterval = blockSize = 12
-*
-*  |       block#0       | |      block#1        | |vInts|
-*  d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
-*                          ^                       ^       (level 0 skip point)
-*
-* Note that skipWriter will ignore first document in block#0, since 
-* it is useless as a skip point.  Also, we'll never skip into the vInts
-* block, only record skip data at the start its start point(if it exist).
-*
-* For each skip point, we will record: 
-* 1. lastDocID, 
-* 2. its related file points(position, payload), 
-* 3. related numbers or uptos(position, payload).
-* 4. start offset.
-*
-*/
+ * Write skip lists with multiple levels, and support skip within block ints.
+ *
+ * Assume that docFreq = 28, skipInterval = blockSize = 12
+ *
+ *  |       block#0       | |      block#1        | |vInts|
+ *  d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
+ *                          ^                       ^       (level 0 skip point)
+ *
+ * Note that skipWriter will ignore first document in block#0, since 
+ * it is useless as a skip point.  Also, we'll never skip into the vInts
+ * block, only record skip data at the start its start point(if it exist).
+ *
+ * For each skip point, we will record: 
+ * 1. lastDocID, 
+ * 2. its related file points(position, payload), 
+ * 3. related numbers or uptos(position, payload).
+ * 4. start offset.
+ *
+ */
 final class BlockPackedSkipWriter extends MultiLevelSkipListWriter {
   private boolean DEBUG = BlockPackedPostingsReader.DEBUG;
   
@@ -69,8 +69,6 @@ final class BlockPackedSkipWriter extend
   private boolean fieldHasPayloads;
 
   public BlockPackedSkipWriter(int maxSkipLevels, int blockSize, int docCount, IndexOutput docOut, IndexOutput posOut, IndexOutput payOut) {
-    // nocommit figure out what skipMultiplier is best (4 is
-    // total guess):
     super(blockSize, 8, maxSkipLevels, docCount);
     this.docOut = docOut;
     this.posOut = posOut;