You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2011/01/05 21:25:44 UTC

svn commit: r1055622 [3/14] - in /lucene/dev/branches/docvalues: ./ dev-tools/ dev-tools/eclipse/ dev-tools/idea/ dev-tools/idea/.idea/ dev-tools/idea/.idea/libraries/ dev-tools/idea/lucene/ dev-tools/idea/lucene/contrib/ dev-tools/idea/lucene/contrib/...

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java Wed Jan  5 20:25:17 2011
@@ -65,7 +65,6 @@ public class ConcurrentMergeScheduler ex
 
   protected Directory dir;
 
-  private boolean closed;
   protected IndexWriter writer;
   protected int mergeThreadCount;
 
@@ -147,18 +146,37 @@ public class ConcurrentMergeScheduler ex
    *  pause & unpause threads. */
   protected synchronized void updateMergeThreads() {
 
-    CollectionUtil.mergeSort(mergeThreads, compareByMergeDocCount);
+    // Only look at threads that are alive & not in the
+    // process of stopping (ie have an active merge):
+    final List<MergeThread> activeMerges = new ArrayList<MergeThread>();
+
+    int threadIdx = 0;
+    while (threadIdx < mergeThreads.size()) {
+      final MergeThread mergeThread = mergeThreads.get(threadIdx);
+      if (!mergeThread.isAlive()) {
+        // Prune any dead threads
+        mergeThreads.remove(threadIdx);
+        continue;
+      }
+      if (mergeThread.getCurrentMerge() != null) {
+        activeMerges.add(mergeThread);
+      }
+      threadIdx++;
+    }
+
+    CollectionUtil.mergeSort(activeMerges, compareByMergeDocCount);
     
-    final int count = mergeThreads.size();
     int pri = mergeThreadPriority;
-    for(int i=0;i<count;i++) {
-      final MergeThread mergeThread = mergeThreads.get(i);
+    final int activeMergeCount = activeMerges.size();
+    for (threadIdx=0;threadIdx<activeMergeCount;threadIdx++) {
+      final MergeThread mergeThread = activeMerges.get(threadIdx);
       final MergePolicy.OneMerge merge = mergeThread.getCurrentMerge();
-      if (merge == null) {
+      if (merge == null) { 
         continue;
       }
+
       final boolean doPause;
-      if (i < count-maxThreadCount) {
+      if (threadIdx < activeMergeCount-maxThreadCount) {
         doPause = true;
       } else {
         doPause = false;
@@ -208,23 +226,29 @@ public class ConcurrentMergeScheduler ex
 
   @Override
   public void close() {
-    closed = true;
+    sync();
   }
 
-  public synchronized void sync() {
-    while(mergeThreadCount() > 0) {
-      if (verbose())
-        message("now wait for threads; currently " + mergeThreads.size() + " still running");
-      final int count = mergeThreads.size();
-      if (verbose()) {
-        for(int i=0;i<count;i++)
-          message("    " + i + ": " + mergeThreads.get(i));
+  /** Wait for any running merge threads to finish */
+  public void sync() {
+    while(true) {
+      MergeThread toSync = null;
+      synchronized(this) {
+        for(MergeThread t : mergeThreads) {
+          if (t.isAlive()) {
+            toSync = t;
+            break;
+          }
+        }
       }
-      
-      try {
-        wait();
-      } catch (InterruptedException ie) {
-        throw new ThreadInterruptedException(ie);
+      if (toSync != null) {
+        try {
+          toSync.join();
+        } catch (InterruptedException ie) {
+          throw new ThreadInterruptedException(ie);
+        }
+      } else {
+        break;
       }
     }
   }
@@ -232,9 +256,12 @@ public class ConcurrentMergeScheduler ex
   private synchronized int mergeThreadCount() {
     int count = 0;
     final int numThreads = mergeThreads.size();
-    for(int i=0;i<numThreads;i++)
-      if (mergeThreads.get(i).isAlive())
+    for(int i=0;i<numThreads;i++) {
+      final MergeThread t = mergeThreads.get(i);
+      if (t.isAlive() && t.getCurrentMerge() != null) {
         count++;
+      }
+    }
     return count;
   }
 
@@ -311,11 +338,17 @@ public class ConcurrentMergeScheduler ex
           // merge:
           merger = getMergeThread(writer, merge);
           mergeThreads.add(merger);
-          updateMergeThreads();
-          if (verbose())
+          if (verbose()) {
             message("    launch new thread [" + merger.getName() + "]");
+          }
 
           merger.start();
+
+          // Must call this after starting the thread else
+          // the new thread is removed from mergeThreads
+          // (since it's not alive yet):
+          updateMergeThreads();
+
           success = true;
         }
       } finally {
@@ -408,8 +441,6 @@ public class ConcurrentMergeScheduler ex
             if (verbose())
               message("  merge thread: do another merge " + merge.segString(dir));
           } else {
-            done = true;
-            updateMergeThreads();
             break;
           }
         }
@@ -428,11 +459,10 @@ public class ConcurrentMergeScheduler ex
           }
         }
       } finally {
+        done = true;
         synchronized(ConcurrentMergeScheduler.this) {
-          ConcurrentMergeScheduler.this.notifyAll();
-          boolean removed = mergeThreads.remove(this);
-          assert removed;
           updateMergeThreads();
+          ConcurrentMergeScheduler.this.notifyAll();
         }
       }
     }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DirectoryReader.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DirectoryReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DirectoryReader.java Wed Jan  5 20:25:17 2011
@@ -20,7 +20,6 @@ package org.apache.lucene.index;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -31,7 +30,6 @@ import java.util.Set;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.search.Similarity;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
@@ -64,7 +62,6 @@ class DirectoryReader extends IndexReade
   private SegmentReader[] subReaders;
   private int[] starts;                           // 1st docno for each segment
   private final Map<SegmentReader,ReaderUtil.Slice> subReaderToSlice = new HashMap<SegmentReader,ReaderUtil.Slice>();
-  private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
   private int maxDoc = 0;
   private int numDocs = -1;
   private boolean hasDeletions = false;
@@ -186,7 +183,7 @@ class DirectoryReader extends IndexReade
 
   /** This constructor is only used for {@link #reopen()} */
   DirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts,
-                  Map<String,byte[]> oldNormsCache, boolean readOnly, boolean doClone, int termInfosIndexDivisor, CodecProvider codecs) throws IOException {
+                  boolean readOnly, boolean doClone, int termInfosIndexDivisor, CodecProvider codecs) throws IOException {
     this.directory = directory;
     this.readOnly = readOnly;
     this.segmentInfos = infos;
@@ -274,38 +271,6 @@ class DirectoryReader extends IndexReade
     
     // initialize the readers to calculate maxDoc before we try to reuse the old normsCache
     initialize(newReaders);
-    
-    // try to copy unchanged norms from the old normsCache to the new one
-    if (oldNormsCache != null) {
-      for (Map.Entry<String,byte[]> entry: oldNormsCache.entrySet()) {
-        String field = entry.getKey();
-        if (!hasNorms(field)) {
-          continue;
-        }
-
-        byte[] oldBytes = entry.getValue();
-
-        byte[] bytes = new byte[maxDoc()];
-
-        for (int i = 0; i < subReaders.length; i++) {
-          Integer oldReaderIndex = segmentReaders.get(subReaders[i].getSegmentName());
-
-          // this SegmentReader was not re-opened, we can copy all of its norms 
-          if (oldReaderIndex != null &&
-               (oldReaders[oldReaderIndex.intValue()] == subReaders[i] 
-                 || oldReaders[oldReaderIndex.intValue()].norms.get(field) == subReaders[i].norms.get(field))) {
-            // we don't have to synchronize here: either this constructor is called from a SegmentReader,
-            // in which case no old norms cache is present, or it is called from MultiReader.reopen(),
-            // which is synchronized
-            System.arraycopy(oldBytes, oldStarts[oldReaderIndex.intValue()], bytes, starts[i], starts[i+1] - starts[i]);
-          } else {
-            subReaders[i].norms(field, bytes, starts[i]);
-          }
-        }
-
-        normsCache.put(field, bytes);      // update cache
-      }
-    }
   }
 
   /** {@inheritDoc} */
@@ -497,7 +462,7 @@ class DirectoryReader extends IndexReade
 
   private synchronized DirectoryReader doReopen(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
     DirectoryReader reader;
-    reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, openReadOnly, doClone, termInfosIndexDivisor, codecs);
+    reader = new DirectoryReader(directory, infos, subReaders, starts, openReadOnly, doClone, termInfosIndexDivisor, codecs);
     return reader;
   }
 
@@ -637,41 +602,18 @@ class DirectoryReader extends IndexReade
   @Override
   public synchronized byte[] norms(String field) throws IOException {
     ensureOpen();
-    byte[] bytes = normsCache.get(field);
-    if (bytes != null)
-      return bytes;          // cache hit
-    if (!hasNorms(field))
-      return null;
-
-    bytes = new byte[maxDoc()];
-    for (int i = 0; i < subReaders.length; i++)
-      subReaders[i].norms(field, bytes, starts[i]);
-    normsCache.put(field, bytes);      // update cache
-    return bytes;
+    throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
   }
 
   @Override
   public synchronized void norms(String field, byte[] result, int offset)
     throws IOException {
-    ensureOpen();
-    byte[] bytes = normsCache.get(field);
-    if (bytes==null && !hasNorms(field)) {
-      Arrays.fill(result, offset, result.length, Similarity.getDefault().encodeNormValue(1.0f));
-    } else if (bytes != null) {                           // cache hit
-      System.arraycopy(bytes, 0, result, offset, maxDoc());
-    } else {
-      for (int i = 0; i < subReaders.length; i++) {      // read from segments
-        subReaders[i].norms(field, result, offset + starts[i]);
-      }
-    }
+    throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
   }
 
   @Override
   protected void doSetNorm(int n, String field, byte value)
     throws CorruptIndexException, IOException {
-    synchronized (normsCache) {
-      normsCache.remove(field);                         // clear cache      
-    }
     int i = readerIndex(n);                           // find segment num
     subReaders[i].setNorm(n-starts[i], field, value); // dispatch
   }
@@ -864,7 +806,6 @@ class DirectoryReader extends IndexReade
   @Override
   protected synchronized void doClose() throws IOException {
     IOException ioe = null;
-    normsCache = null;
     for (int i = 0; i < subReaders.length; i++) {
       // try to close each reader, even if an exception is thrown
       try {

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java Wed Jan  5 20:25:17 2011
@@ -111,7 +111,6 @@ final class DocFieldProcessor extends Do
     // FieldInfo.storePayload.
     final String fileName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.FIELD_INFOS_EXTENSION);
     fieldInfos.write(state.directory, fileName);
-    state.flushedFiles.add(fileName);
   }
 
   @Override

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocInverterPerField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocInverterPerField.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocInverterPerField.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocInverterPerField.java Wed Jan  5 20:25:17 2011
@@ -126,8 +126,6 @@ final class DocInverterPerField extends 
 
           // reset the TokenStream to the first token
           stream.reset();
-
-          final int startLength = fieldState.length;
           
           try {
             boolean hasMoreTokens = stream.incrementToken();

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Wed Jan  5 20:25:17 2011
@@ -590,14 +590,14 @@ final class DocumentsWriter {
         threads.add(threadState.consumer);
       }
 
-      long startNumBytesUsed = bytesUsed();
+      double startMBUsed = bytesUsed()/1024./1024.;
 
       consumer.flush(threads, flushState);
       newSegment.setHasVectors(flushState.hasVectors);
 
       if (infoStream != null) {
         message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors"));
-        message("flushedFiles=" + flushState.flushedFiles);
+        message("flushedFiles=" + newSegment.files());
         message("flushed codecs=" + newSegment.getSegmentCodecs());
       }
 
@@ -609,22 +609,23 @@ final class DocumentsWriter {
         }
 
         CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
-        for(String fileName : flushState.flushedFiles) {
+        for(String fileName : newSegment.files()) {
           cfsWriter.addFile(fileName);
         }
         cfsWriter.close();
-        deleter.deleteNewFiles(flushState.flushedFiles);
-
+        deleter.deleteNewFiles(newSegment.files());
         newSegment.setUseCompoundFile(true);
       }
 
       if (infoStream != null) {
         message("flush: segment=" + newSegment);
-        final long newSegmentSize = newSegment.sizeInBytes();
-        message("  ramUsed=" + nf.format(startNumBytesUsed / 1024. / 1024.) + " MB" +
-            " newFlushedSize=" + nf.format(newSegmentSize / 1024 / 1024) + " MB" +
-            " docs/MB=" + nf.format(numDocs / (newSegmentSize / 1024. / 1024.)) +
-            " new/old=" + nf.format(100.0 * newSegmentSize / startNumBytesUsed) + "%");
+        final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.;
+        final double newSegmentSize = newSegment.sizeInBytes(true)/1024./1024.;
+        message("  ramUsed=" + nf.format(startMBUsed) + " MB" +
+                " newFlushedSize=" + nf.format(newSegmentSize) + " MB" +
+                " (" + nf.format(newSegmentSizeNoStore) + " MB w/o doc stores)" +
+                " docs/MB=" + nf.format(numDocs / newSegmentSize) +
+                " new/old=" + nf.format(100.0 * newSegmentSizeNoStore / startMBUsed) + "%");
       }
 
       success = true;
@@ -908,7 +909,8 @@ final class DocumentsWriter {
   final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK;
 
   /* if you increase this, you must fix field cache impl for
-   * getTerms/getTermsIndex requires <= 32768 */
+   * getTerms/getTermsIndex requires <= 32768.  Also fix
+   * DeltaBytesWriter's TERM_EOF if necessary. */
   final static int MAX_TERM_LENGTH_UTF8 = BYTE_BLOCK_SIZE-2;
 
   /* Initial chunks size of the shared int[] blocks used to

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/IndexWriter.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/IndexWriter.java Wed Jan  5 20:25:17 2011
@@ -2256,11 +2256,11 @@ public class IndexWriter implements Clos
       // Now create the compound file if needed
       if (useCompoundFile) {
         merger.createCompoundFile(mergedName + ".cfs", info);
-        info.setUseCompoundFile(true);
         
         // delete new non cfs files directly: they were never
         // registered with IFD
-        deleter.deleteNewFiles(merger.getMergedFiles(info));
+        deleter.deleteNewFiles(info.files());
+        info.setUseCompoundFile(true);
       }
 
       // Register the new segment
@@ -3159,7 +3159,7 @@ public class IndexWriter implements Clos
 
             synchronized(this) {
               deleter.deleteFile(compoundFileName);
-              deleter.deleteNewFiles(merger.getMergedFiles(merge.info));
+              deleter.deleteNewFiles(merge.info.files());
             }
           }
         }
@@ -3170,7 +3170,7 @@ public class IndexWriter implements Clos
 
           // delete new non cfs files directly: they were never
           // registered with IFD
-          deleter.deleteNewFiles(merger.getMergedFiles(merge.info));
+          deleter.deleteNewFiles(merge.info.files());
 
           if (merge.isAborted()) {
             if (infoStream != null) {

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java Wed Jan  5 20:25:17 2011
@@ -55,7 +55,7 @@ public final class IndexWriterConfig imp
   public static enum OpenMode { CREATE, APPEND, CREATE_OR_APPEND }
   
   /** Default value is 32. Change using {@link #setTermIndexInterval(int)}. */
-  public static final int DEFAULT_TERM_INDEX_INTERVAL = 32;
+  public static final int DEFAULT_TERM_INDEX_INTERVAL = 32;                   // TODO: this should be private to the codec, not settable here
 
   /** Denotes a flush trigger is disabled. */
   public final static int DISABLE_AUTO_FLUSH = -1;
@@ -115,7 +115,7 @@ public final class IndexWriterConfig imp
   private OpenMode openMode;
   private int maxFieldLength;
   private Similarity similarity;
-  private int termIndexInterval;
+  private int termIndexInterval; // TODO: this should be private to the codec, not settable here
   private MergeScheduler mergeScheduler;
   private long writeLockTimeout;
   private int maxBufferedDeleteTerms;
@@ -147,7 +147,7 @@ public final class IndexWriterConfig imp
     openMode = OpenMode.CREATE_OR_APPEND;
     maxFieldLength = UNLIMITED_FIELD_LENGTH;
     similarity = Similarity.getDefault();
-    termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL;
+    termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL; // TODO: this should be private to the codec, not settable here
     mergeScheduler = new ConcurrentMergeScheduler();
     writeLockTimeout = WRITE_LOCK_TIMEOUT;
     maxBufferedDeleteTerms = DEFAULT_MAX_BUFFERED_DELETE_TERMS;
@@ -312,7 +312,7 @@ public final class IndexWriterConfig imp
    * 
    * @see #DEFAULT_TERM_INDEX_INTERVAL
    */
-  public IndexWriterConfig setTermIndexInterval(int interval) {
+  public IndexWriterConfig setTermIndexInterval(int interval) { // TODO: this should be private to the codec, not settable here
     this.termIndexInterval = interval;
     return this;
   }
@@ -322,7 +322,7 @@ public final class IndexWriterConfig imp
    * 
    * @see #setTermIndexInterval(int)
    */
-  public int getTermIndexInterval() {
+  public int getTermIndexInterval() { // TODO: this should be private to the codec, not settable here
     return termIndexInterval;
   }
 
@@ -613,7 +613,7 @@ public final class IndexWriterConfig imp
     sb.append("openMode=").append(openMode).append("\n");
     sb.append("maxFieldLength=").append(maxFieldLength).append("\n");
     sb.append("similarity=").append(similarity.getClass().getName()).append("\n");
-    sb.append("termIndexInterval=").append(termIndexInterval).append("\n");
+    sb.append("termIndexInterval=").append(termIndexInterval).append("\n"); // TODO: this should be private to the codec, not settable here
     sb.append("mergeScheduler=").append(mergeScheduler.getClass().getName()).append("\n");
     sb.append("default WRITE_LOCK_TIMEOUT=").append(WRITE_LOCK_TIMEOUT).append("\n");
     sb.append("writeLockTimeout=").append(writeLockTimeout).append("\n");

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java Wed Jan  5 20:25:17 2011
@@ -184,7 +184,7 @@ public abstract class LogMergePolicy ext
   }
   
   protected long sizeBytes(SegmentInfo info) throws IOException {
-    long byteSize = info.sizeInBytes();
+    long byteSize = info.sizeInBytes(true);
     if (calibrateSizeByDeletes) {
       int delCount = writer.get().numDeletedDocs(info);
       double delRatio = (info.docCount <= 0 ? 0.0f : ((float)delCount / (float)info.docCount));
@@ -241,6 +241,9 @@ public abstract class LogMergePolicy ext
     while (start >= 0) {
       SegmentInfo info = infos.info(start);
       if (size(info) > maxMergeSize || sizeDocs(info) > maxMergeDocs) {
+        if (verbose()) {
+          message("optimize: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSize + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")");
+        }
         // need to skip that segment + add a merge for the 'right' segments,
         // unless there is only 1 which is optimized.
         if (last - start - 1 > 1 || (start != last - 1 && !isOptimized(infos.info(start + 1)))) {
@@ -335,10 +338,18 @@ public abstract class LogMergePolicy ext
       int maxNumSegments, Set<SegmentInfo> segmentsToOptimize) throws IOException {
 
     assert maxNumSegments > 0;
+    if (verbose()) {
+      message("findMergesForOptimize: maxNumSegs=" + maxNumSegments + " segsToOptimize= "+ segmentsToOptimize);
+    }
 
     // If the segments are already optimized (e.g. there's only 1 segment), or
     // there are <maxNumSegements, all optimized, nothing to do.
-    if (isOptimized(infos, maxNumSegments, segmentsToOptimize)) return null;
+    if (isOptimized(infos, maxNumSegments, segmentsToOptimize)) {
+      if (verbose()) {
+        message("already optimized; skip");
+      }
+      return null;
+    }
     
     // Find the newest (rightmost) segment that needs to
     // be optimized (other segments may have been flushed
@@ -352,10 +363,20 @@ public abstract class LogMergePolicy ext
       }
     }
 
-    if (last == 0) return null;
+    if (last == 0) {
+      if (verbose()) {
+        message("last == 0; skip");
+      }
+      return null;
+    }
     
     // There is only one segment already, and it is optimized
-    if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) return null;
+    if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) {
+      if (verbose()) {
+        message("already 1 seg; skip");
+      }
+      return null;
+    }
 
     // Check if there are any segments above the threshold
     boolean anyTooLarge = false;

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/MergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/MergePolicy.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/MergePolicy.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/MergePolicy.java Wed Jan  5 20:25:17 2011
@@ -165,7 +165,7 @@ public abstract class MergePolicy implem
     public long totalBytesSize() throws IOException {
       long total = 0;
       for (SegmentInfo info : segments) {
-        total += info.sizeInBytes();
+        total += info.sizeInBytes(true);
       }
       return total;
     }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/MultiReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/MultiReader.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/MultiReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/MultiReader.java Wed Jan  5 20:25:17 2011
@@ -18,14 +18,12 @@ package org.apache.lucene.index;
  */
 
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -38,7 +36,6 @@ public class MultiReader extends IndexRe
   private int[] starts;                           // 1st docno for each segment
   private final Map<IndexReader,ReaderUtil.Slice> subReaderToSlice = new HashMap<IndexReader,ReaderUtil.Slice>();
   private boolean[] decrefOnClose;                // remember which subreaders to decRef on close
-  private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
   private int maxDoc = 0;
   private int numDocs = -1;
   private boolean hasDeletions = false;
@@ -316,45 +313,18 @@ public class MultiReader extends IndexRe
   
   @Override
   public synchronized byte[] norms(String field) throws IOException {
-    ensureOpen();
-    byte[] bytes = normsCache.get(field);
-    if (bytes != null)
-      return bytes;          // cache hit
-    if (!hasNorms(field))
-      return null;
-
-    bytes = new byte[maxDoc()];
-    for (int i = 0; i < subReaders.length; i++)
-      subReaders[i].norms(field, bytes, starts[i]);
-    normsCache.put(field, bytes);      // update cache
-    return bytes;
+    throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
   }
 
   @Override
   public synchronized void norms(String field, byte[] result, int offset)
     throws IOException {
-    ensureOpen();
-    byte[] bytes = normsCache.get(field);
-    for (int i = 0; i < subReaders.length; i++)      // read from segments
-      subReaders[i].norms(field, result, offset + starts[i]);
-
-    if (bytes==null && !hasNorms(field)) {
-      Arrays.fill(result, offset, result.length, Similarity.getDefault().encodeNormValue(1.0f));
-    } else if (bytes != null) {                         // cache hit
-      System.arraycopy(bytes, 0, result, offset, maxDoc());
-    } else {
-      for (int i = 0; i < subReaders.length; i++) {     // read from segments
-        subReaders[i].norms(field, result, offset + starts[i]);
-      }
-    }
+    throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
   }
 
   @Override
   protected void doSetNorm(int n, String field, byte value)
     throws CorruptIndexException, IOException {
-    synchronized (normsCache) {
-      normsCache.remove(field);                         // clear cache
-    }
     int i = readerIndex(n);                           // find segment num
     subReaders[i].setNorm(n-starts[i], field, value); // dispatch
   }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/NormsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/NormsWriter.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/NormsWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/NormsWriter.java Wed Jan  5 20:25:17 2011
@@ -89,7 +89,6 @@ final class NormsWriter extends Inverted
     }
 
     final String normsFileName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.NORMS_EXTENSION);
-    state.flushedFiles.add(normsFileName);
     IndexOutput normsOut = state.directory.createOutput(normsFileName);
 
     try {

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/ParallelReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/ParallelReader.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/ParallelReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/ParallelReader.java Wed Jan  5 20:25:17 2011
@@ -25,6 +25,7 @@ import org.apache.lucene.index.values.Do
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.Pair;
 import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
+import org.apache.lucene.search.Similarity;
 import org.apache.lucene.util.BytesRef;
 
 import java.io.IOException;
@@ -55,7 +56,8 @@ public class ParallelReader extends Inde
   private SortedMap<String,IndexReader> fieldToReader = new TreeMap<String,IndexReader>();
   private Map<IndexReader,Collection<String>> readerToFields = new HashMap<IndexReader,Collection<String>>();
   private List<IndexReader> storedFieldReaders = new ArrayList<IndexReader>();
-
+  private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
+  
   private int maxDoc;
   private int numDocs;
   private boolean hasDeletions;
@@ -143,6 +145,9 @@ public class ParallelReader extends Inde
       reader.incRef();
     }
     decrefOnClose.add(Boolean.valueOf(incRefReaders));
+    synchronized(normsCache) {
+      normsCache.clear(); // TODO: don't need to clear this for all fields really?
+    }
   }
 
   private class ParallelFieldsEnum extends FieldsEnum {
@@ -293,6 +298,7 @@ public class ParallelReader extends Inde
 
     if (reopened) {
       List<Boolean> newDecrefOnClose = new ArrayList<Boolean>();
+      // TODO: maybe add a special reopen-ctor for norm-copying?
       ParallelReader pr = new ParallelReader();
       for (int i = 0; i < readers.size(); i++) {
         IndexReader oldReader = readers.get(i);
@@ -434,27 +440,51 @@ public class ParallelReader extends Inde
   }
 
   @Override
-  public byte[] norms(String field) throws IOException {
+  public synchronized byte[] norms(String field) throws IOException {
     ensureOpen();
     IndexReader reader = fieldToReader.get(field);
-    return reader==null ? null : reader.norms(field);
+
+    if (reader==null)
+      return null;
+    
+    byte[] bytes = normsCache.get(field);
+    if (bytes != null)
+      return bytes;
+    if (!hasNorms(field))
+      return null;
+
+    bytes = MultiNorms.norms(reader, field);
+    normsCache.put(field, bytes);
+    return bytes;
   }
 
   @Override
-  public void norms(String field, byte[] result, int offset)
+  public synchronized void norms(String field, byte[] result, int offset)
     throws IOException {
+    // TODO: maybe optimize
     ensureOpen();
     IndexReader reader = fieldToReader.get(field);
-    if (reader!=null)
-      reader.norms(field, result, offset);
+    if (reader==null)
+      return;
+    
+    byte[] norms = norms(field);
+    if (norms == null) {
+      Arrays.fill(result, offset, result.length, Similarity.getDefault().encodeNormValue(1.0f));
+    } else {
+      System.arraycopy(norms, 0, result, offset, maxDoc());
+    }
   }
 
   @Override
   protected void doSetNorm(int n, String field, byte value)
     throws CorruptIndexException, IOException {
     IndexReader reader = fieldToReader.get(field);
-    if (reader!=null)
+    if (reader!=null) {
+      synchronized(normsCache) {
+        normsCache.remove(field);
+      }
       reader.doSetNorm(n, field, value);
+    }
   }
 
   @Override

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentInfo.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentInfo.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentInfo.java Wed Jan  5 20:25:17 2011
@@ -222,13 +222,16 @@ public final class SegmentInfo {
   
   /** Returns total size in bytes of all of files used by
    *  this segment. */
-  public long sizeInBytes() throws IOException {
+  public long sizeInBytes(boolean includeDocStores) throws IOException {
     if (sizeInBytes == -1) {
       List<String> files = files();
       final int size = files.size();
       sizeInBytes = 0;
       for(int i=0;i<size;i++) {
         final String fileName = files.get(i);
+        if (!includeDocStores && IndexFileNames.isDocStoreFile(fileName)) {
+          continue;
+        }
         // We don't count bytes used by a shared doc store
         // against this segment:
         if (docStoreOffset == -1 || !IndexFileNames.isDocStoreFile(fileName))

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentMerger.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentMerger.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentMerger.java Wed Jan  5 20:25:17 2011
@@ -20,8 +20,6 @@ package org.apache.lucene.index;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Set;
-import java.util.HashSet;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -127,40 +125,11 @@ final class SegmentMerger {
     return mergedDocs;
   }
 
-  final Collection<String> getMergedFiles(final SegmentInfo info) throws IOException {
-    Set<String> fileSet = new HashSet<String>();
-
-    // Basic files
-    for (String ext : IndexFileNames.COMPOUND_EXTENSIONS_NOT_CODEC) {
-      fileSet.add(IndexFileNames.segmentFileName(segment, "", ext));
-    }
-    segmentWriteState.segmentCodecs.files(directory, info, fileSet);
-    
-    // Fieldable norm files
-    final int numFIs = fieldInfos.size();
-    for (int i = 0; i < numFIs; i++) {
-      final FieldInfo fi = fieldInfos.fieldInfo(i);
-      if (fi.isIndexed && !fi.omitNorms) {
-        fileSet.add(IndexFileNames.segmentFileName(segment, "", IndexFileNames.NORMS_EXTENSION));
-        break;
-      }
-    }
-
-    // Vector files
-    if (fieldInfos.hasVectors()) {
-      for (String ext : IndexFileNames.VECTOR_EXTENSIONS) {
-        fileSet.add(IndexFileNames.segmentFileName(segment, "", ext));
-      }
-    }
-
-    return fileSet;
-  }
-
   final Collection<String> createCompoundFile(String fileName, final SegmentInfo info)
           throws IOException {
 
     // Now merge all added files
-    Collection<String> files = getMergedFiles(info);
+    Collection<String> files = info.files();
     CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, fileName, checkAbort);
     for (String file : files) {
       cfsWriter.addFile(file);

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentReadState.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentReadState.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentReadState.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentReadState.java Wed Jan  5 20:25:17 2011
@@ -33,7 +33,7 @@ public class SegmentReadState {
   // terms index on init (preflex is the only once currently
   // that must do so), then it should negate this value to
   // get the app's terms divisor:
-  public final int termsIndexDivisor;
+  public int termsIndexDivisor;
   public final String codecId;
 
   public SegmentReadState(Directory dir, SegmentInfo info,

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java Wed Jan  5 20:25:17 2011
@@ -45,7 +45,7 @@ public class SegmentWriteState {
    * faster, while larger values use less memory and make searching slightly
    * slower.  Searching is typically not dominated by dictionary lookup, so
    * tweaking this is rarely useful.*/
-  public final int termIndexInterval;
+  public int termIndexInterval;                   // TODO: this should be private to the codec, not settable here or in IWC
 
   /** Expert: The fraction of TermDocs entries stored in skip tables,
    * used to accelerate {@link DocsEnum#advance(int)}.  Larger values result in

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java Wed Jan  5 20:25:17 2011
@@ -18,6 +18,13 @@ package org.apache.lucene.index;
  */
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+
+import org.apache.lucene.search.Similarity;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.ReaderUtil; // javadoc
 
@@ -48,6 +55,8 @@ import org.apache.lucene.index.MultiRead
 
 public final class SlowMultiReaderWrapper extends FilterIndexReader {
 
+  private final Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
+  
   public SlowMultiReaderWrapper(IndexReader other) {
     super(other);
   }
@@ -62,9 +71,44 @@ public final class SlowMultiReaderWrappe
     return MultiFields.getDeletedDocs(in);
   }
 
+  
   @Override
   public IndexReader[] getSequentialSubReaders() {
     return null;
   }
+
+  @Override
+  public synchronized byte[] norms(String field) throws IOException {
+    ensureOpen();
+    byte[] bytes = normsCache.get(field);
+    if (bytes != null)
+      return bytes;
+    if (!hasNorms(field))
+      return null;
+
+    bytes = MultiNorms.norms(in, field);
+    normsCache.put(field, bytes);
+    return bytes;
+  }
+
+  @Override
+  public synchronized void norms(String field, byte[] bytes, int offset) throws IOException {
+    // TODO: maybe optimize
+    ensureOpen();
+    byte[] norms = norms(field);
+    if (norms == null) {
+      Arrays.fill(bytes, offset, bytes.length, Similarity.getDefault().encodeNormValue(1.0f));
+    } else {
+      System.arraycopy(norms, 0, bytes, offset, maxDoc());
+    }
+  }
   
+  @Override
+  protected void doSetNorm(int n, String field, byte value)
+      throws CorruptIndexException, IOException {
+    synchronized(normsCache) {
+      normsCache.remove(field);
+    }
+    in.doSetNorm(n, field, value);
+  }
 }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java Wed Jan  5 20:25:17 2011
@@ -53,11 +53,7 @@ final class StoredFieldsWriter {
       fieldsWriter = null;
       lastDocID = 0;
 
-      String fieldsName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.FIELDS_EXTENSION);
       String fieldsIdxName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.FIELDS_INDEX_EXTENSION);
-      state.flushedFiles.add(fieldsName);
-      state.flushedFiles.add(fieldsIdxName);
-
       if (4 + ((long) state.numDocs) * 8 != state.directory.fileLength(fieldsIdxName)) {
         throw new RuntimeException("after flush: fdx size mismatch: " + state.numDocs + " docs vs " + state.directory.fileLength(fieldsIdxName) + " length in bytes of " + fieldsIdxName + " file exists?=" + state.directory.fileExists(fieldsIdxName));
       }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java Wed Jan  5 20:25:17 2011
@@ -59,17 +59,10 @@ final class TermVectorsTermsWriter exten
       tvx = tvd = tvf = null;
       assert state.segmentName != null;
       String idxName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_INDEX_EXTENSION);
-      String fldName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_FIELDS_EXTENSION);
-      String docName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
-
       if (4 + ((long) state.numDocs) * 16 != state.directory.fileLength(idxName)) {
         throw new RuntimeException("after flush: tvx size mismatch: " + state.numDocs + " docs vs " + state.directory.fileLength(idxName) + " length in bytes of " + idxName + " file exists?=" + state.directory.fileExists(idxName));
       }
 
-      state.flushedFiles.add(idxName);
-      state.flushedFiles.add(fldName);
-      state.flushedFiles.add(docName);
-
       lastDocID = 0;
       state.hasVectors = hasVectors;
       hasVectors = false;

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/DeltaBytesReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/DeltaBytesReader.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/DeltaBytesReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/DeltaBytesReader.java Wed Jan  5 20:25:17 2011
@@ -36,13 +36,17 @@ final class DeltaBytesReader {
     term.copy(text);
   }
 
-  void read() throws IOException {
+  boolean read() throws IOException {
     final int start = in.readVInt();
+    if (start == DeltaBytesWriter.TERM_EOF) {
+      return false;
+    }
     final int suffix = in.readVInt();
     assert start <= term.length: "start=" + start + " length=" + term.length;
     final int newLength = start+suffix;
     term.grow(newLength);
     in.readBytes(term.bytes, start, suffix);
     term.length = newLength;
+    return true;
   }
 }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/DeltaBytesWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/DeltaBytesWriter.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/DeltaBytesWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/DeltaBytesWriter.java Wed Jan  5 20:25:17 2011
@@ -20,11 +20,18 @@ package org.apache.lucene.index.codecs;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.BytesRef;
+import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
 
 import java.io.IOException;
 
 final class DeltaBytesWriter {
 
+  // Must be bigger than
+  // DocumentsWriter.MAX_TERM_LENGTH_UTF8.  If you change
+  // this it's an index format change, so that change must be
+  // versioned:
+  final static int TERM_EOF = BYTE_BLOCK_SIZE;
+
   private byte[] lastBytes = new byte[10];
   private int lastLength;
   final IndexOutput out;
@@ -45,8 +52,9 @@ final class DeltaBytesWriter {
 
     final int limit = length < lastLength ? length : lastLength;
     while(start < limit) {
-      if (bytes[upto] != lastBytes[start])
+      if (bytes[upto] != lastBytes[start]) {
         break;
+      }
       start++;
       upto++;
     }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java Wed Jan  5 20:25:17 2011
@@ -33,29 +33,6 @@ import java.util.Collection;
 import java.util.Comparator;
 import java.io.IOException;
 
-/**
- * Uses a simplistic format to record terms dict index
- * information.  Limititations:
- *
- *   - Index for all fields is loaded entirely into RAM up
- *     front 
- *   - Index is stored in RAM using shared byte[] that
- *     wastefully expand every term.  Using FST to share
- *     common prefix & suffix would save RAM.
- *   - Index is taken at regular numTerms (every 128 by
- *     default); might be better to do it by "net docFreqs"
- *     encountered, so that for spans of low-freq terms we
- *     take index less often.
- *
- * A better approach might be something similar to how
- * postings are encoded, w/ multi-level skips.  Ie, load all
- * terms index data into memory, as a single large compactly
- * encoded stream (eg delta bytes + delta offset).  Index
- * that w/ multi-level skipper.  Then to look up a term is
- * the equivalent binary search, using the skipper instead,
- * while data remains compressed in memory.
- */
-
 import org.apache.lucene.index.IndexFileNames;
 
 /** @lucene.experimental */
@@ -74,7 +51,7 @@ public class FixedGapTermsIndexReader ex
   final private int indexInterval;
 
   // Closed if indexLoaded is true:
-  final private IndexInput in;
+  private IndexInput in;
   private volatile boolean indexLoaded;
 
   private final Comparator<BytesRef> termComp;
@@ -85,7 +62,7 @@ public class FixedGapTermsIndexReader ex
   private final PagedBytes termBytes = new PagedBytes(PAGED_BYTES_BITS);
   private PagedBytes.Reader termBytesReader;
 
-  final HashMap<FieldInfo,FieldIndexReader> fields = new HashMap<FieldInfo,FieldIndexReader>();
+  final HashMap<FieldInfo,FieldIndexData> fields = new HashMap<FieldInfo,FieldIndexData>();
   
   // start of the field info data
   protected long dirOffset;
@@ -95,7 +72,7 @@ public class FixedGapTermsIndexReader ex
 
     this.termComp = termComp;
 
-    IndexInput in = dir.openInput(IndexFileNames.segmentFileName(segment, codecId, FixedGapTermsIndexWriter.TERMS_INDEX_EXTENSION));
+    in = dir.openInput(IndexFileNames.segmentFileName(segment, codecId, FixedGapTermsIndexWriter.TERMS_INDEX_EXTENSION));
     
     boolean success = false;
 
@@ -116,49 +93,137 @@ public class FixedGapTermsIndexReader ex
       seekDir(in, dirOffset);
 
       // Read directory
-      final int numFields = in.readInt();
-
+      final int numFields = in.readVInt();      
       for(int i=0;i<numFields;i++) {
-        final int field = in.readInt();
-        final int numIndexTerms = in.readInt();
-        final long termsStart = in.readLong();
-        final long indexStart = in.readLong();
-        final long packedIndexStart = in.readLong();
-        final long packedOffsetsStart = in.readLong();
+        final int field = in.readVInt();
+        final int numIndexTerms = in.readVInt();
+        final long termsStart = in.readVLong();
+        final long indexStart = in.readVLong();
+        final long packedIndexStart = in.readVLong();
+        final long packedOffsetsStart = in.readVLong();
         assert packedIndexStart >= indexStart: "packedStart=" + packedIndexStart + " indexStart=" + indexStart + " numIndexTerms=" + numIndexTerms + " seg=" + segment;
-        if (numIndexTerms > 0) {
-          final FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
-          fields.put(fieldInfo, new FieldIndexReader(in, fieldInfo, numIndexTerms, indexStart, termsStart, packedIndexStart, packedOffsetsStart));
-        }
+        final FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
+        fields.put(fieldInfo, new FieldIndexData(fieldInfo, numIndexTerms, indexStart, termsStart, packedIndexStart, packedOffsetsStart));
       }
       success = true;
     } finally {
       if (indexDivisor > 0) {
         in.close();
-        this.in = null;
+        in = null;
         if (success) {
           indexLoaded = true;
         }
         termBytesReader = termBytes.freeze(true);
-      } else {
-        this.in = in;
       }
     }
   }
   
+  @Override
+  public int getDivisor() {
+    return indexDivisor;
+  }
+
   protected void readHeader(IndexInput input) throws IOException {
     CodecUtil.checkHeader(input, FixedGapTermsIndexWriter.CODEC_NAME,
       FixedGapTermsIndexWriter.VERSION_START, FixedGapTermsIndexWriter.VERSION_START);
     dirOffset = input.readLong();
   }
 
-  private final class FieldIndexReader extends FieldReader {
+  private class IndexEnum extends FieldIndexEnum {
+    private final FieldIndexData.CoreFieldIndex fieldIndex;
+    private final BytesRef term = new BytesRef();
+    private final BytesRef nextTerm = new BytesRef();
+    private long ord;
 
-    final private FieldInfo fieldInfo;
+    public IndexEnum(FieldIndexData.CoreFieldIndex fieldIndex) {
+      this.fieldIndex = fieldIndex;
+    }
+
+    @Override
+    public BytesRef term() {
+      return term;
+    }
+
+    @Override
+    public long seek(BytesRef target) {
+      int lo = 0;				  // binary search
+      int hi = fieldIndex.numIndexTerms - 1;
+      assert totalIndexInterval > 0 : "totalIndexInterval=" + totalIndexInterval;
+
+      while (hi >= lo) {
+        int mid = (lo + hi) >>> 1;
+
+        final long offset = fieldIndex.termOffsets.get(mid);
+        final int length = (int) (fieldIndex.termOffsets.get(1+mid) - offset);
+        termBytesReader.fillSlice(term, fieldIndex.termBytesStart + offset, length);
+
+        int delta = termComp.compare(target, term);
+        if (delta < 0) {
+          hi = mid - 1;
+        } else if (delta > 0) {
+          lo = mid + 1;
+        } else {
+          assert mid >= 0;
+          ord = mid*totalIndexInterval;
+          return fieldIndex.termsStart + fieldIndex.termsDictOffsets.get(mid);
+        }
+      }
 
-    private volatile CoreFieldIndex coreIndex;
+      if (hi < 0) {
+        assert hi == -1;
+        hi = 0;
+      }
 
-    private final IndexInput in;
+      final long offset = fieldIndex.termOffsets.get(hi);
+      final int length = (int) (fieldIndex.termOffsets.get(1+hi) - offset);
+      termBytesReader.fillSlice(term, fieldIndex.termBytesStart + offset, length);
+
+      ord = hi*totalIndexInterval;
+      return fieldIndex.termsStart + fieldIndex.termsDictOffsets.get(hi);
+    }
+
+    @Override
+    public long next() {
+      final int idx = 1 + (int) (ord / totalIndexInterval);
+      if (idx >= fieldIndex.numIndexTerms) {
+        return -1;
+      }
+      ord += totalIndexInterval;
+
+      final long offset = fieldIndex.termOffsets.get(idx);
+      final int length = (int) (fieldIndex.termOffsets.get(1+idx) - offset);
+      termBytesReader.fillSlice(nextTerm, fieldIndex.termBytesStart + offset, length);
+      return fieldIndex.termsStart + fieldIndex.termsDictOffsets.get(idx);
+    }
+
+    @Override
+    public long ord() {
+      return ord;
+    }
+
+    @Override
+    public long seek(long ord) {
+      int idx = (int) (ord / totalIndexInterval);
+      // caller must ensure ord is in bounds
+      assert idx < fieldIndex.numIndexTerms;
+      final long offset = fieldIndex.termOffsets.get(idx);
+      final int length = (int) (fieldIndex.termOffsets.get(1+idx) - offset);
+      termBytesReader.fillSlice(term, fieldIndex.termBytesStart + offset, length);
+      this.ord = idx * totalIndexInterval;
+      return fieldIndex.termsStart + fieldIndex.termsDictOffsets.get(idx);
+    }
+  }
+
+  @Override
+  public boolean supportsOrd() {
+    return true;
+  }
+
+  private final class FieldIndexData {
+
+    final private FieldInfo fieldInfo;
+
+    volatile CoreFieldIndex coreIndex;
 
     private final long indexStart;
     private final long termsStart;
@@ -167,11 +232,10 @@ public class FixedGapTermsIndexReader ex
 
     private final int numIndexTerms;
 
-    public FieldIndexReader(IndexInput in, FieldInfo fieldInfo, int numIndexTerms, long indexStart, long termsStart, long packedIndexStart,
-                            long packedOffsetsStart) throws IOException {
+    public FieldIndexData(FieldInfo fieldInfo, int numIndexTerms, long indexStart, long termsStart, long packedIndexStart,
+                          long packedOffsetsStart) throws IOException {
 
       this.fieldInfo = fieldInfo;
-      this.in = in;
       this.termsStart = termsStart;
       this.indexStart = indexStart;
       this.packedIndexStart = packedIndexStart;
@@ -182,12 +246,7 @@ public class FixedGapTermsIndexReader ex
       // is -1, so that PrefixCodedTermsReader can call
       // isIndexTerm for each field:
       if (indexDivisor > 0) {
-        coreIndex = new CoreFieldIndex(indexStart,
-                                       termsStart,
-                                       packedIndexStart,
-                                       packedOffsetsStart,
-                                       numIndexTerms);
-      
+        loadTermsIndex();
       }
     }
 
@@ -197,46 +256,11 @@ public class FixedGapTermsIndexReader ex
       }
     }
 
-    @Override
-    public boolean isIndexTerm(long ord, int docFreq, boolean onlyLoaded) {
-      if (onlyLoaded) {
-        return ord % totalIndexInterval == 0;
-      } else {
-        return ord % indexInterval == 0;
-      }
-    }
-
-    @Override
-    public boolean nextIndexTerm(long ord, TermsIndexResult result) throws IOException {
-      if (coreIndex == null) {
-        throw new IllegalStateException("terms index was not loaded");
-      } else {
-        return coreIndex.nextIndexTerm(ord, result);
-      }
-    }
-
-    @Override
-    public void getIndexOffset(BytesRef term, TermsIndexResult result) throws IOException {
-      // You must call loadTermsIndex if you had specified -1 for indexDivisor
-      if (coreIndex == null) {
-        throw new IllegalStateException("terms index was not loaded");
-      }
-      coreIndex.getIndexOffset(term, result);
-    }
-
-    @Override
-    public void getIndexOffset(long ord, TermsIndexResult result) throws IOException {
-      // You must call loadTermsIndex if you had specified
-      // indexDivisor < 0 to ctor
-      if (coreIndex == null) {
-        throw new IllegalStateException("terms index was not loaded");
-      }
-      coreIndex.getIndexOffset(ord, result);
-    }
-
     private final class CoreFieldIndex {
 
-      final private long termBytesStart;
+      // where this field's terms begin in the packed byte[]
+      // data
+      final long termBytesStart;
 
       // offset into index termBytes
       final PackedInts.Reader termOffsets;
@@ -245,7 +269,6 @@ public class FixedGapTermsIndexReader ex
       final PackedInts.Reader termsDictOffsets;
 
       final int numIndexTerms;
-
       final long termsStart;
 
       public CoreFieldIndex(long indexStart, long termsStart, long packedIndexStart, long packedOffsetsStart, int numIndexTerms) throws IOException {
@@ -315,7 +338,6 @@ public class FixedGapTermsIndexReader ex
               termsDictOffsetsM.set(upto, termsDictOffsetsIter.next());
 
               termOffsetsM.set(upto, termOffsetUpto);
-              upto++;
 
               long termOffset = termOffsetsIter.next();
               long nextTermOffset = termOffsetsIter.next();
@@ -328,6 +350,11 @@ public class FixedGapTermsIndexReader ex
               termBytes.copy(clone, numTermBytes);
               termOffsetUpto += numTermBytes;
 
+              upto++;
+              if (upto == this.numIndexTerms) {
+                break;
+              }
+
               // skip terms:
               termsDictOffsetsIter.next();
               for(int i=0;i<indexDivisor-2;i++) {
@@ -344,71 +371,10 @@ public class FixedGapTermsIndexReader ex
           }
         }
       }
-
-      public boolean nextIndexTerm(long ord, TermsIndexResult result) throws IOException {
-        int idx = 1 + (int) (ord / totalIndexInterval);
-        if (idx < numIndexTerms) {
-          fillResult(idx, result);
-          return true;
-        } else {
-          return false;
-        }
-      }
-
-      private void fillResult(int idx, TermsIndexResult result) {
-        final long offset = termOffsets.get(idx);
-        final int length = (int) (termOffsets.get(1+idx) - offset);
-        termBytesReader.fillSlice(result.term, termBytesStart + offset, length);
-        result.position = idx * totalIndexInterval;
-        result.offset = termsStart + termsDictOffsets.get(idx);
-      }
-
-      public void getIndexOffset(BytesRef term, TermsIndexResult result) throws IOException {
-        int lo = 0;					  // binary search
-        int hi = numIndexTerms - 1;
-        assert totalIndexInterval > 0 : "totalIndexInterval=" + totalIndexInterval;
-
-        while (hi >= lo) {
-          int mid = (lo + hi) >>> 1;
-
-          final long offset = termOffsets.get(mid);
-          final int length = (int) (termOffsets.get(1+mid) - offset);
-          termBytesReader.fillSlice(result.term, termBytesStart + offset, length);
-
-          int delta = termComp.compare(term, result.term);
-          if (delta < 0) {
-            hi = mid - 1;
-          } else if (delta > 0) {
-            lo = mid + 1;
-          } else {
-            assert mid >= 0;
-            result.position = mid*totalIndexInterval;
-            result.offset = termsStart + termsDictOffsets.get(mid);
-            return;
-          }
-        }
-        if (hi < 0) {
-          assert hi == -1;
-          hi = 0;
-        }
-
-        final long offset = termOffsets.get(hi);
-        final int length = (int) (termOffsets.get(1+hi) - offset);
-        termBytesReader.fillSlice(result.term, termBytesStart + offset, length);
-
-        result.position = hi*totalIndexInterval;
-        result.offset = termsStart + termsDictOffsets.get(hi);
-      }
-
-      public void getIndexOffset(long ord, TermsIndexResult result) throws IOException {
-        int idx = (int) (ord / totalIndexInterval);
-        // caller must ensure ord is in bounds
-        assert idx < numIndexTerms;
-        fillResult(idx, result);
-      }
     }
   }
 
+  // Externally synced in IndexWriter
   @Override
   public void loadTermsIndex(int indexDivisor) throws IOException {
     if (!indexLoaded) {
@@ -420,7 +386,7 @@ public class FixedGapTermsIndexReader ex
       }
       this.totalIndexInterval = indexInterval * this.indexDivisor;
 
-      Iterator<FieldIndexReader> it = fields.values().iterator();
+      Iterator<FieldIndexData> it = fields.values().iterator();
       while(it.hasNext()) {
         it.next().loadTermsIndex();
       }
@@ -432,8 +398,13 @@ public class FixedGapTermsIndexReader ex
   }
 
   @Override
-  public FieldReader getField(FieldInfo fieldInfo) {
-    return fields.get(fieldInfo);
+  public FieldIndexEnum getFieldEnum(FieldInfo fieldInfo) {
+    final FieldIndexData fieldData = fields.get(fieldInfo);
+    if (fieldData.coreIndex == null) {
+      return null;
+    } else {
+      return new IndexEnum(fieldData.coreIndex);
+    }
   }
 
   public static void files(Directory dir, SegmentInfo info, String id, Collection<String> files) {

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexWriter.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexWriter.java Wed Jan  5 20:25:17 2011
@@ -31,7 +31,14 @@ import java.util.List;
 import java.util.ArrayList;
 import java.io.IOException;
 
-/** @lucene.experimental */
+/**
+ * Selects every Nth term as and index term, and hold term
+ * bytes fully expanded in memory.  This terms index
+ * supports seeking by ord.  See {@link
+ * VariableGapTermsIndexWriter} for a more memory efficient
+ * terms index that does not support seeking by ord.
+ *
+ * @lucene.experimental */
 public class FixedGapTermsIndexWriter extends TermsIndexWriterBase {
   protected final IndexOutput out;
 
@@ -50,7 +57,6 @@ public class FixedGapTermsIndexWriter ex
 
   public FixedGapTermsIndexWriter(SegmentWriteState state) throws IOException {
     final String indexFileName = IndexFileNames.segmentFileName(state.segmentName, state.codecId, TERMS_INDEX_EXTENSION);
-    state.flushedFiles.add(indexFileName);
     termIndexInterval = state.termIndexInterval;
     out = state.directory.createOutput(indexFileName);
     fieldInfos = state.fieldInfos;
@@ -203,15 +209,25 @@ public class FixedGapTermsIndexWriter ex
     final long dirStart = out.getFilePointer();
     final int fieldCount = fields.size();
 
-    out.writeInt(fieldCount);
+    int nonNullFieldCount = 0;
     for(int i=0;i<fieldCount;i++) {
       SimpleFieldWriter field = fields.get(i);
-      out.writeInt(field.fieldInfo.number);
-      out.writeInt(field.numIndexTerms);
-      out.writeLong(field.termsStart);
-      out.writeLong(field.indexStart);
-      out.writeLong(field.packedIndexStart);
-      out.writeLong(field.packedOffsetsStart);
+      if (field.numIndexTerms > 0) {
+        nonNullFieldCount++;
+      }
+    }
+
+    out.writeVInt(nonNullFieldCount);
+    for(int i=0;i<fieldCount;i++) {
+      SimpleFieldWriter field = fields.get(i);
+      if (field.numIndexTerms > 0) {
+        out.writeVInt(field.fieldInfo.number);
+        out.writeVInt(field.numIndexTerms);
+        out.writeVLong(field.termsStart);
+        out.writeVLong(field.indexStart);
+        out.writeVLong(field.packedIndexStart);
+        out.writeVLong(field.packedOffsetsStart);
+      }
     }
     writeTrailer(dirStart);
     out.close();

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/PrefixCodedTermsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/PrefixCodedTermsReader.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/PrefixCodedTermsReader.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/PrefixCodedTermsReader.java Wed Jan  5 20:25:17 2011
@@ -141,12 +141,10 @@ public class PrefixCodedTermsReader exte
         final long numTerms = in.readLong();
         assert numTerms >= 0;
         final long termsStartPointer = in.readLong();
-        final TermsIndexReaderBase.FieldReader fieldIndexReader;
         final FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
-        fieldIndexReader = indexReader.getField(fieldInfo);
         if (numTerms > 0) {
           assert !fields.containsKey(fieldInfo.name);
-          fields.put(fieldInfo.name, new FieldReader(fieldIndexReader, fieldInfo, numTerms, termsStartPointer));
+          fields.put(fieldInfo.name, new FieldReader(fieldInfo, numTerms, termsStartPointer));
         }
       }
       success = true;
@@ -258,14 +256,12 @@ public class PrefixCodedTermsReader exte
     final long numTerms;
     final FieldInfo fieldInfo;
     final long termsStartPointer;
-    final TermsIndexReaderBase.FieldReader fieldIndexReader;
 
-    FieldReader(TermsIndexReaderBase.FieldReader fieldIndexReader, FieldInfo fieldInfo, long numTerms, long termsStartPointer) {
+    FieldReader(FieldInfo fieldInfo, long numTerms, long termsStartPointer) {
       assert numTerms > 0;
       this.fieldInfo = fieldInfo;
       this.numTerms = numTerms;
       this.termsStartPointer = termsStartPointer;
-      this.fieldIndexReader = fieldIndexReader;
     }
 
     @Override
@@ -288,18 +284,25 @@ public class PrefixCodedTermsReader exte
       return numTerms;
     }
 
-    // Iterates through terms in this field
+    // Iterates through terms in this field, not supporting ord()
     private class SegmentTermsEnum extends TermsEnum {
       private final IndexInput in;
       private final DeltaBytesReader bytesReader;
       private final TermState state;
       private boolean seekPending;
-      private final TermsIndexReaderBase.TermsIndexResult indexResult = new TermsIndexReaderBase.TermsIndexResult();
       private final FieldAndTerm fieldTerm = new FieldAndTerm();
+      private final TermsIndexReaderBase.FieldIndexEnum indexEnum;
+      private boolean positioned;
+      private boolean didIndexNext;
+      private BytesRef nextIndexTerm;
+      private boolean isIndexTerm;
+      private final boolean doOrd;
 
       SegmentTermsEnum() throws IOException {
         in = (IndexInput) PrefixCodedTermsReader.this.in.clone();
         in.seek(termsStartPointer);
+        indexEnum = indexReader.getFieldEnum(fieldInfo);
+        doOrd = indexReader.supportsOrd();
         bytesReader = new DeltaBytesReader(in);
         fieldTerm.field = fieldInfo.name;
         state = postingsReader.newTermState();
@@ -319,12 +322,41 @@ public class PrefixCodedTermsReader exte
                        stateCopy);
       }
 
+      // called only from assert
+      private boolean first;
+      private int indexTermCount;
+
+      private boolean startSeek() {
+        first = true;
+        indexTermCount = 0;
+        return true;
+      }
+
+      private boolean checkSeekScan() {
+        if (!first && isIndexTerm) {
+          indexTermCount++;
+          if (indexTermCount >= indexReader.getDivisor()) {
+            //System.out.println("now fail count=" + indexTermCount);
+            return false;
+          }
+        }
+        first = false;
+        return true;
+      }
+
       /** Seeks until the first term that's >= the provided
        *  text; returns SeekStatus.FOUND if the exact term
        *  is found, SeekStatus.NOT_FOUND if a different term
        *  was found, SeekStatus.END if we hit EOF */
       @Override
       public SeekStatus seek(BytesRef term, boolean useCache) throws IOException {
+
+        if (indexEnum == null) {
+          throw new IllegalStateException("terms index was not loaded");
+        }
+        
+        //System.out.println("te.seek term=" + fieldInfo.name + ":" + term.utf8ToString() + " current=" + term().utf8ToString() + " useCache=" + useCache + " this="  + this);
+
         // Check cache
         fieldTerm.term = term;
         TermState cachedState;
@@ -333,7 +365,9 @@ public class PrefixCodedTermsReader exte
           if (cachedState != null) {
             state.copy(cachedState);
             seekPending = true;
+            positioned = false;
             bytesReader.term.copy(term);
+            //System.out.println("  cached!");
             return SeekStatus.FOUND;
           }
         } else {
@@ -342,36 +376,54 @@ public class PrefixCodedTermsReader exte
 
         boolean doSeek = true;
 
-        if (state.ord != -1) {
-          // we are positioned
+        if (positioned) {
 
           final int cmp = termComp.compare(bytesReader.term, term);
 
           if (cmp == 0) {
             // already at the requested term
             return SeekStatus.FOUND;
-          }
+          } else if (cmp < 0) {
+
+            if (seekPending) {
+              seekPending = false;
+              in.seek(state.filePointer);
+              indexEnum.seek(bytesReader.term);
+              didIndexNext = false;
+            }
+
+            // Target term is after current term
+            if (!didIndexNext) {
+              if (indexEnum.next() == -1) {
+                nextIndexTerm = null;
+              } else {
+                nextIndexTerm = indexEnum.term();
+              }
+              //System.out.println("  now do index next() nextIndexTerm=" + (nextIndexTerm == null ? "null" : nextIndexTerm.utf8ToString()));
+              didIndexNext = true;
+            }
 
-          if (cmp < 0 &&
-              fieldIndexReader.nextIndexTerm(state.ord, indexResult) &&
-              termComp.compare(indexResult.term, term) > 0) {
-            // Optimization: requested term is within the
-            // same index block we are now in; skip seeking
-            // (but do scanning):
-            doSeek = false;
+            if (nextIndexTerm == null || termComp.compare(term, nextIndexTerm) < 0) {
+              // Optimization: requested term is within the
+              // same index block we are now in; skip seeking
+              // (but do scanning):
+              doSeek = false;
+              //System.out.println("  skip seek: nextIndexTerm=" + nextIndexTerm);
+            }
           }
         }
 
-        // Used only for assert:
-        final long startOrd;
-
         if (doSeek) {
 
-          // As index to find biggest index term that's <=
-          // our text:
-          fieldIndexReader.getIndexOffset(term, indexResult);
+          positioned = true;
 
-          in.seek(indexResult.offset);
+          // Ask terms index to find biggest index term that's <=
+          // our text:
+          in.seek(indexEnum.seek(term));
+          didIndexNext = false;
+          if (doOrd) {
+            state.ord = indexEnum.ord()-1;
+          }
           seekPending = false;
 
           // NOTE: the first next() after an index seek is
@@ -380,22 +432,20 @@ public class PrefixCodedTermsReader exte
           // those bytes in the primary file, but then when
           // scanning over an index term we'd have to
           // special case it:
-          bytesReader.reset(indexResult.term);
-          
-          state.ord = indexResult.position-1;
-          assert state.ord >= -1: "ord=" + state.ord + " pos=" + indexResult.position;
-
-          startOrd = indexResult.position;
+          bytesReader.reset(indexEnum.term());
+          //System.out.println("  doSeek term=" + indexEnum.term().utf8ToString() + " vs target=" + term.utf8ToString());
         } else {
-          startOrd = -1;
+          //System.out.println("  skip seek");
         }
 
+        assert startSeek();
+
         // Now scan:
-        while(next() != null) {
+        while (next() != null) {
           final int cmp = termComp.compare(bytesReader.term, term);
           if (cmp == 0) {
-
-            if (doSeek && useCache) {
+            // Done!
+            if (useCache) {
               // Store in cache
               FieldAndTerm entryKey = new FieldAndTerm(fieldTerm);
               cachedState = (TermState) state.clone();
@@ -403,94 +453,62 @@ public class PrefixCodedTermsReader exte
               cachedState.filePointer = in.getFilePointer();
               termsCache.put(entryKey, cachedState);
             }
-              
+
             return SeekStatus.FOUND;
           } else if (cmp > 0) {
             return SeekStatus.NOT_FOUND;
           }
+
           // The purpose of the terms dict index is to seek
           // the enum to the closest index term before the
           // term we are looking for.  So, we should never
           // cross another index term (besides the first
           // one) while we are scanning:
-          assert state.ord == startOrd || !fieldIndexReader.isIndexTerm(state.ord, state.docFreq, true): "state.ord=" + state.ord + " startOrd=" + startOrd + " ir.isIndexTerm=" + fieldIndexReader.isIndexTerm(state.ord, state.docFreq, true) + " state.docFreq=" + state.docFreq;
+          assert checkSeekScan();
         }
 
+        positioned = false;
         return SeekStatus.END;
       }
 
       @Override
-      public SeekStatus seek(long ord) throws IOException {
-
-        // TODO: should we cache term lookup by ord as well...?
-
-        if (ord >= numTerms) {
-          state.ord = numTerms-1;
-          return SeekStatus.END;
-        }
-
-        fieldIndexReader.getIndexOffset(ord, indexResult);
-        in.seek(indexResult.offset);
-        seekPending = false;
-
-        // NOTE: the first next() after an index seek is
-        // wasteful, since it redundantly reads the same
-        // bytes into the buffer
-        bytesReader.reset(indexResult.term);
-
-        state.ord = indexResult.position-1;
-        assert state.ord >= -1: "ord=" + state.ord;
-
-        // Now, scan:
-        int left = (int) (ord - state.ord);
-        while(left > 0) {
-          final BytesRef term = next();
-          assert term != null;
-          left--;
-        }
-
-        // always found
-        return SeekStatus.FOUND;
-      }
-
-      @Override
       public BytesRef term() {
         return bytesReader.term;
       }
 
       @Override
-      public long ord() {
-        return state.ord;
-      }
-
-      @Override
       public BytesRef next() throws IOException {
 
         if (seekPending) {
           seekPending = false;
           in.seek(state.filePointer);
+          indexEnum.seek(bytesReader.term);
+          didIndexNext = false;
         }
         
-        if (state.ord >= numTerms-1) {
+        if (!bytesReader.read()) {
+          //System.out.println("te.next end!");
+          positioned = false;
           return null;
         }
 
-        bytesReader.read();
-        state.docFreq = in.readVInt();
+        final byte b = in.readByte();
+        isIndexTerm = (b & 0x80) != 0;
+
+        if ((b & 0x40) == 0) {
+          // Fast case -- docFreq fits in 6 bits
+          state.docFreq = b & 0x3F;
+        } else {
+          state.docFreq = (in.readVInt() << 6) | (b & 0x3F);
+        }
 
-        // TODO: would be cleaner, but space-wasting, to
-        // simply record a bit into each index entry as to
-        // whether it's an index entry or not, rather than
-        // re-compute that information... or, possibly store
-        // a "how many terms until next index entry" in each
-        // index entry, but that'd require some tricky
-        // lookahead work when writing the index
         postingsReader.readTerm(in,
                                 fieldInfo, state,
-                                fieldIndexReader.isIndexTerm(1+state.ord, state.docFreq, false));
-
+                                isIndexTerm);
         state.ord++;
+        positioned = true;
 
+        //System.out.println("te.next term=" + bytesReader.term.utf8ToString());
         return bytesReader.term;
       }
 
@@ -514,6 +532,50 @@ public class PrefixCodedTermsReader exte
           return postingsReader.docsAndPositions(fieldInfo, state, skipDocs, reuse);
         }
       }
+
+      @Override
+      public SeekStatus seek(long ord) throws IOException {
+
+        if (indexEnum == null) {
+          throw new IllegalStateException("terms index was not loaded");
+        }
+
+        if (ord >= numTerms) {
+          state.ord = numTerms-1;
+          return SeekStatus.END;
+        }
+
+        in.seek(indexEnum.seek(ord));
+        seekPending = false;
+        positioned = true;
+
+        // NOTE: the first next() after an index seek is
+        // wasteful, since it redundantly reads the same
+        // bytes into the buffer
+        bytesReader.reset(indexEnum.term());
+
+        state.ord = indexEnum.ord()-1;
+        assert state.ord >= -1: "ord=" + state.ord;
+
+        // Now, scan:
+        int left = (int) (ord - state.ord);
+        while(left > 0) {
+          final BytesRef term = next();
+          assert term != null;
+          left--;
+        }
+
+        // always found
+        return SeekStatus.FOUND;
+      }
+
+      @Override
+      public long ord() {
+        if (!doOrd) {
+          throw new UnsupportedOperationException();
+        }
+        return state.ord;
+      }
     }
   }
 }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/PrefixCodedTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/PrefixCodedTermsWriter.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/PrefixCodedTermsWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/PrefixCodedTermsWriter.java Wed Jan  5 20:25:17 2011
@@ -74,7 +74,6 @@ public class PrefixCodedTermsWriter exte
     this.termComp = termComp;
     out = state.directory.createOutput(termsFileName);
     termsIndexWriter.setTermsOutput(out);
-    state.flushedFiles.add(termsFileName);
 
     fieldInfos = state.fieldInfos;
     writeHeader(out);
@@ -93,7 +92,7 @@ public class PrefixCodedTermsWriter exte
   }
 
   @Override
-  public TermsConsumer addField(FieldInfo field) {
+  public TermsConsumer addField(FieldInfo field) throws IOException {
     assert currentField == null || currentField.name.compareTo(field.name) < 0 : "current field name " + (currentField == null? null: currentField.name) + " given: " +field.name;
     currentField = field;
     TermsIndexWriterBase.FieldWriter fieldIndexWriter = termsIndexWriter.addField(field);
@@ -173,12 +172,25 @@ public class PrefixCodedTermsWriter exte
     public void finishTerm(BytesRef text, int numDocs) throws IOException {
 
       assert numDocs > 0;
+      //System.out.println("finishTerm term=" + fieldInfo.name + ":" + text.utf8ToString() + " fp="  + out.getFilePointer());
 
       final boolean isIndexTerm = fieldIndexWriter.checkIndexTerm(text, numDocs);
 
       termWriter.write(text);
-      out.writeVInt(numDocs);
+      final int highBit = isIndexTerm ? 0x80 : 0;
+      //System.out.println("  isIndex=" + isIndexTerm);
 
+      // This is a vInt, except, we steal top bit to record
+      // whether this was an indexed term:
+      if ((numDocs & ~0x3F) == 0) {
+        // Fast case -- docFreq fits in 6 bits
+        out.writeByte((byte) (highBit | numDocs));
+      } else {
+        // Write bottom 6 bits of docFreq, then write the
+        // remainder as vInt:
+        out.writeByte((byte) (highBit | 0x40 | (numDocs & 0x3F)));
+        out.writeVInt(numDocs >>> 6);
+      }
       postingsWriter.finishTerm(numDocs, isIndexTerm);
       numTerms++;
     }
@@ -186,6 +198,8 @@ public class PrefixCodedTermsWriter exte
     // Finishes all terms in this field
     @Override
     public void finish() throws IOException {
+      // EOF marker:
+      out.writeVInt(DeltaBytesWriter.TERM_EOF);
       fieldIndexWriter.finish();
     }
   }

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java Wed Jan  5 20:25:17 2011
@@ -21,6 +21,7 @@ import org.apache.lucene.index.FieldInfo
 import org.apache.lucene.util.BytesRef;
 
 import java.io.IOException;
+import java.io.Closeable;
 import java.util.Collection;
 
 
@@ -38,39 +39,37 @@ import java.util.Collection;
  * text. 
  * @lucene.experimental */
 
-public abstract class TermsIndexReaderBase {
+public abstract class TermsIndexReaderBase implements Closeable {
 
-  static class TermsIndexResult {
-    long position;
-    final BytesRef term = new BytesRef();
-    long offset;
-  };
-
-  public abstract class FieldReader {
-    /** Returns position of "largest" index term that's <=
-     *  text.  Returned TermsIndexResult may be reused
-     *  across calls.  This resets internal state, and
-     *  expects that you'll then scan the file and
-     *  sequentially call isIndexTerm for each term
-     *  encountered. */
-    public abstract void getIndexOffset(BytesRef term, TermsIndexResult result) throws IOException;
-
-    public abstract void getIndexOffset(long ord, TermsIndexResult result) throws IOException;
-
-    /** Call this sequentially for each term encoutered,
-     *  after calling {@link #getIndexOffset}. */
-    public abstract boolean isIndexTerm(long ord, int docFreq, boolean onlyLoaded) throws IOException;
-
-    /** Finds the next index term, after the specified
-     *  ord.  Returns true if one exists.  */
-    public abstract boolean nextIndexTerm(long ord, TermsIndexResult result) throws IOException;
-  }
-
-  public abstract FieldReader getField(FieldInfo fieldInfo);
+  public abstract FieldIndexEnum getFieldEnum(FieldInfo fieldInfo);
 
   public abstract void loadTermsIndex(int indexDivisor) throws IOException;
 
   public abstract void close() throws IOException;
 
   public abstract void getExtensions(Collection<String> extensions);
-}
\ No newline at end of file
+
+  public abstract boolean supportsOrd();
+
+  public abstract int getDivisor();
+
+  // Similar to TermsEnum, except, the only "metadata" it
+  // reports for a given indexed term is the long fileOffset
+  // into the main terms dict (_X.tis) file:
+  public static abstract class FieldIndexEnum {
+
+    /** Seeks to "largest" indexed term that's <=
+     *  term; retruns file pointer index (into the main
+     *  terms index file) for that term */
+    public abstract long seek(BytesRef term) throws IOException;
+
+    /** Returns -1 at end */
+    public abstract long next() throws IOException;
+
+    public abstract BytesRef term();
+
+    // Only impl'd if supportsOrd() returns true!
+    public abstract long seek(long ord) throws IOException;
+    public abstract long ord();
+  }
+}

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexWriterBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexWriterBase.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexWriterBase.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexWriterBase.java Wed Jan  5 20:25:17 2011
@@ -32,7 +32,7 @@ public abstract class TermsIndexWriterBa
     public abstract void finish() throws IOException;
   }
 
-  public abstract FieldWriter addField(FieldInfo fieldInfo);
+  public abstract FieldWriter addField(FieldInfo fieldInfo) throws IOException;
 
   public abstract void close() throws IOException;
-}
\ No newline at end of file
+}

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java Wed Jan  5 20:25:17 2011
@@ -228,8 +228,11 @@ public final class PulsingPostingsWriter
   }
 
   @Override
-  public void finishDoc() {
+  public void finishDoc() throws IOException {
     assert omitTF || currentDoc.numPositions == currentDoc.termDocFreq;
+    if (pulsed) {
+      wrappedPostingsWriter.finishDoc();
+    }
   }
 
   boolean pendingIsIndexTerm;

Modified: lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java?rev=1055622&r1=1055621&r2=1055622&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java Wed Jan  5 20:25:17 2011
@@ -594,6 +594,7 @@ public class SepPostingsReaderImpl exten
       }
 
       final int code = posReader.next();
+      assert code >= 0;
       if (storePayloads) {
         if ((code & 1) != 0) {
           // Payload length has changed