You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2011/11/07 21:20:00 UTC

svn commit: r1198916 - in /lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index: MergeState.java SegmentMerger.java SegmentReader.java codecs/DefaultFieldsWriter.java codecs/FieldsReader.java codecs/FieldsWriter.java

Author: rmuir
Date: Mon Nov  7 20:19:59 2011
New Revision: 1198916

URL: http://svn.apache.org/viewvc?rev=1198916&view=rev
Log:
LUCENE-2621: move stored fields merging under codec control

Modified:
    lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/MergeState.java
    lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
    lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentReader.java
    lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/DefaultFieldsWriter.java
    lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsReader.java
    lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsWriter.java

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/MergeState.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/MergeState.java?rev=1198916&r1=1198915&r2=1198916&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/MergeState.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/MergeState.java Mon Nov  7 20:19:59 2011
@@ -58,6 +58,11 @@ public class MergeState {
   public DirPayloadProcessor[] dirPayloadProcessor;
   public PayloadProcessor[] currentPayloadProcessor;
 
+  // TODO: get rid of this? it tells you which segments are 'aligned' (e.g. for bulk merging)
+  // but is this really so expensive to compute again in different components, versus once in SM?
+  public SegmentReader[] matchingSegmentReaders;
+  public int matchedCount;
+  
   public static class CheckAbort {
     private double workCount;
     private MergePolicy.OneMerge merge;

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentMerger.java?rev=1198916&r1=1198915&r2=1198916&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentMerger.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentMerger.java Mon Nov  7 20:19:59 2011
@@ -23,19 +23,16 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.IndexReader.FieldOption;
 import org.apache.lucene.index.MergePolicy.MergeAbortedException;
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.index.codecs.FieldsConsumer;
-import org.apache.lucene.index.codecs.FieldsReader;
 import org.apache.lucene.index.codecs.FieldsWriter;
 import org.apache.lucene.index.codecs.PerDocConsumer;
 import org.apache.lucene.store.CompoundFileDirectory;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.IOUtils;
@@ -56,7 +53,7 @@ final class SegmentMerger {
   private final int termIndexInterval;
 
   /** Maximum number of contiguous documents to bulk-copy
-      when merging stored fields */
+      when merging term vectors */
   private final static int MAX_RAW_MERGE_DOCS = 4192;
 
   private final Codec codec;
@@ -178,17 +175,12 @@ final class SegmentMerger {
     }
   }
 
-  private SegmentReader[] matchingSegmentReaders;
-  private int[] rawDocLengths;
-  private int[] rawDocLengths2;
-  private int matchedCount;
-
   private void setMatchingSegmentReaders() {
     // If the i'th reader is a SegmentReader and has
     // identical fieldName -> number mapping, then this
     // array will be non-null at position i:
     int numReaders = mergeState.readers.size();
-    matchingSegmentReaders = new SegmentReader[numReaders];
+    mergeState.matchingSegmentReaders = new SegmentReader[numReaders];
 
     // If this reader is a SegmentReader, and all of its
     // field name -> number mappings match the "merged"
@@ -204,19 +196,16 @@ final class SegmentMerger {
           same = mergeState.fieldInfos.fieldName(fi.number).equals(fi.name);
         }
         if (same) {
-          matchingSegmentReaders[i] = segmentReader;
-          matchedCount++;
+          mergeState.matchingSegmentReaders[i] = segmentReader;
+          mergeState.matchedCount++;
         }
       }
     }
 
-    // Used for bulk-reading raw bytes for stored fields
-    rawDocLengths = new int[MAX_RAW_MERGE_DOCS];
-    rawDocLengths2 = new int[MAX_RAW_MERGE_DOCS];
     if (mergeState.infoStream != null) {
-      mergeState.infoStream.message("SM", "merge store matchedCount=" + matchedCount + " vs " + mergeState.readers.size());
-      if (matchedCount != mergeState.readers.size()) {
-        mergeState.infoStream.message("SM", "" + (mergeState.readers.size() - matchedCount) + " non-bulk merges");
+      mergeState.infoStream.message("SM", "merge store matchedCount=" + mergeState.matchedCount + " vs " + mergeState.readers.size());
+      if (mergeState.matchedCount != mergeState.readers.size()) {
+        mergeState.infoStream.message("SM", "" + (mergeState.readers.size() - mergeState.matchedCount) + " non-bulk merges");
       }
     }
   }
@@ -256,24 +245,7 @@ final class SegmentMerger {
 
     final FieldsWriter fieldsWriter = codec.fieldsFormat().fieldsWriter(directory, segment, context);
     try {
-      int idx = 0;
-      for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
-        final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
-        FieldsReader matchingFieldsReader = null;
-        if (matchingSegmentReader != null) {
-          final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
-          if (fieldsReader != null) {
-            matchingFieldsReader = fieldsReader;
-          }
-        }
-        if (reader.liveDocs != null) {
-          docCount += copyFieldsWithDeletions(fieldsWriter,
-                                              reader, matchingFieldsReader);
-        } else {
-          docCount += copyFieldsNoDeletions(fieldsWriter,
-                                            reader, matchingFieldsReader);
-        }
-      }
+      docCount = fieldsWriter.merge(mergeState);
       fieldsWriter.finish(docCount);
     } finally {
       fieldsWriter.close();
@@ -282,97 +254,19 @@ final class SegmentMerger {
     return docCount;
   }
 
-  private int copyFieldsWithDeletions(final FieldsWriter fieldsWriter, final MergeState.IndexReaderAndLiveDocs reader,
-                                      final FieldsReader matchingFieldsReader)
-    throws IOException, MergeAbortedException, CorruptIndexException {
-    int docCount = 0;
-    final int maxDoc = reader.reader.maxDoc();
-    final Bits liveDocs = reader.liveDocs;
-    assert liveDocs != null;
-    if (matchingFieldsReader != null) {
-      // We can bulk-copy because the fieldInfos are "congruent"
-      for (int j = 0; j < maxDoc;) {
-        if (!liveDocs.get(j)) {
-          // skip deleted docs
-          ++j;
-          continue;
-        }
-        // We can optimize this case (doing a bulk byte copy) since the field
-        // numbers are identical
-        int start = j, numDocs = 0;
-        do {
-          j++;
-          numDocs++;
-          if (j >= maxDoc) break;
-          if (!liveDocs.get(j)) {
-            j++;
-            break;
-          }
-        } while(numDocs < MAX_RAW_MERGE_DOCS);
-
-        IndexInput stream = matchingFieldsReader.rawDocs(rawDocLengths, start, numDocs);
-        fieldsWriter.addRawDocuments(stream, rawDocLengths, numDocs);
-        docCount += numDocs;
-        mergeState.checkAbort.work(300 * numDocs);
-      }
-    } else {
-      for (int j = 0; j < maxDoc; j++) {
-        if (!liveDocs.get(j)) {
-          // skip deleted docs
-          continue;
-        }
-        // TODO: this could be more efficient using
-        // FieldVisitor instead of loading/writing entire
-        // doc; ie we just have to renumber the field number
-        // on the fly?
-        // NOTE: it's very important to first assign to doc then pass it to
-        // fieldsWriter.addDocument; see LUCENE-1282
-        Document doc = reader.reader.document(j);
-        fieldsWriter.addDocument(doc, mergeState.fieldInfos);
-        docCount++;
-        mergeState.checkAbort.work(300);
-      }
-    }
-    return docCount;
-  }
-
-  private int copyFieldsNoDeletions(final FieldsWriter fieldsWriter, final MergeState.IndexReaderAndLiveDocs reader,
-                                    final FieldsReader matchingFieldsReader)
-    throws IOException, MergeAbortedException, CorruptIndexException {
-    final int maxDoc = reader.reader.maxDoc();
-    int docCount = 0;
-    if (matchingFieldsReader != null) {
-      // We can bulk-copy because the fieldInfos are "congruent"
-      while (docCount < maxDoc) {
-        int len = Math.min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
-        IndexInput stream = matchingFieldsReader.rawDocs(rawDocLengths, docCount, len);
-        fieldsWriter.addRawDocuments(stream, rawDocLengths, len);
-        docCount += len;
-        mergeState.checkAbort.work(300 * len);
-      }
-    } else {
-      for (; docCount < maxDoc; docCount++) {
-        // NOTE: it's very important to first assign to doc then pass it to
-        // fieldsWriter.addDocument; see LUCENE-1282
-        Document doc = reader.reader.document(docCount);
-        fieldsWriter.addDocument(doc, mergeState.fieldInfos);
-        mergeState.checkAbort.work(300);
-      }
-    }
-    return docCount;
-  }
-
   /**
    * Merge the TermVectors from each of the segments into the new one.
    * @throws IOException
    */
   private final void mergeVectors(SegmentWriteState segmentWriteState) throws IOException {
     TermVectorsWriter termVectorsWriter = new TermVectorsWriter(directory, segment, mergeState.fieldInfos, context);
-
+    // Used for bulk-reading raw bytes for term vectors
+    int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
+    int rawDocLengths2[] = new int[MAX_RAW_MERGE_DOCS];
     try {
       int idx = 0;
       for (final MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
-        final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
+        final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
         TermVectorsReader matchingVectorsReader = null;
         if (matchingSegmentReader != null) {
           TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();
@@ -383,9 +277,9 @@ final class SegmentMerger {
           }
         }
         if (reader.liveDocs != null) {
-          copyVectorsWithDeletions(termVectorsWriter, matchingVectorsReader, reader);
+          copyVectorsWithDeletions(termVectorsWriter, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
         } else {
-          copyVectorsNoDeletions(termVectorsWriter, matchingVectorsReader, reader);
+          copyVectorsNoDeletions(termVectorsWriter, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
         }
       }
     } finally {
@@ -407,7 +301,9 @@ final class SegmentMerger {
 
   private void copyVectorsWithDeletions(final TermVectorsWriter termVectorsWriter,
                                         final TermVectorsReader matchingVectorsReader,
-                                        final MergeState.IndexReaderAndLiveDocs reader)
+                                        final MergeState.IndexReaderAndLiveDocs reader,
+                                        int rawDocLengths[],
+                                        int rawDocLengths2[])
     throws IOException, MergeAbortedException {
     final int maxDoc = reader.reader.maxDoc();
     final Bits liveDocs = reader.liveDocs;
@@ -454,7 +350,9 @@ final class SegmentMerger {
 
   private void copyVectorsNoDeletions(final TermVectorsWriter termVectorsWriter,
                                       final TermVectorsReader matchingVectorsReader,
-                                      final MergeState.IndexReaderAndLiveDocs reader)
+                                      final MergeState.IndexReaderAndLiveDocs reader,
+                                      int rawDocLengths[],
+                                      int rawDocLengths2[])
       throws IOException, MergeAbortedException {
     final int maxDoc = reader.reader.maxDoc();
     if (matchingVectorsReader != null) {

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentReader.java?rev=1198916&r1=1198915&r2=1198916&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentReader.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/SegmentReader.java Mon Nov  7 20:19:59 2011
@@ -368,7 +368,8 @@ public class SegmentReader extends Index
     hasChanges = false;
   }
 
-  FieldsReader getFieldsReader() {
+  /** @lucene.internal */
+  public FieldsReader getFieldsReader() {
     return fieldsReaderLocal.get();
   }
 

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/DefaultFieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/DefaultFieldsWriter.java?rev=1198916&r1=1198915&r2=1198916&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/DefaultFieldsWriter.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/DefaultFieldsWriter.java Mon Nov  7 20:19:59 2011
@@ -18,13 +18,19 @@ package org.apache.lucene.index.codecs;
 
 import java.io.IOException;
 
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.MergeState;
+import org.apache.lucene.index.SegmentReader;
+import org.apache.lucene.index.MergePolicy.MergeAbortedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
@@ -250,4 +256,118 @@ public final class DefaultFieldsWriter e
       // details.
       throw new RuntimeException("mergeFields produced an invalid result: docCount is " + numDocs + " but fdx file size is " + indexStream.getFilePointer() + " file=" + indexStream.toString() + "; now aborting this merge to prevent index corruption");
   }
+  
+  @Override
+  public int merge(MergeState mergeState) throws IOException {
+    int docCount = 0;
+    // Used for bulk-reading raw bytes for stored fields
+    int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
+    int idx = 0;
+    
+    for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
+      final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
+      DefaultFieldsReader matchingFieldsReader = null;
+      if (matchingSegmentReader != null) {
+        final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
+        // we can only bulk-copy if the matching reader is also a DefaultFieldsReader
+        if (fieldsReader != null && fieldsReader instanceof DefaultFieldsReader) {
+          matchingFieldsReader = (DefaultFieldsReader) fieldsReader;
+        }
+      }
+    
+      if (reader.liveDocs != null) {
+        docCount += copyFieldsWithDeletions(mergeState,
+                                            reader, matchingFieldsReader, rawDocLengths);
+      } else {
+          docCount += copyFieldsNoDeletions(mergeState,
+                                            reader, matchingFieldsReader, rawDocLengths);
+      }
+    }
+
+    return docCount;
+  }
+
+  /** Maximum number of contiguous documents to bulk-copy
+      when merging stored fields */
+  private final static int MAX_RAW_MERGE_DOCS = 4192;
+
+  private int copyFieldsWithDeletions(MergeState mergeState, final MergeState.IndexReaderAndLiveDocs reader,
+                                      final DefaultFieldsReader matchingFieldsReader, int rawDocLengths[])
+    throws IOException, MergeAbortedException, CorruptIndexException {
+    int docCount = 0;
+    final int maxDoc = reader.reader.maxDoc();
+    final Bits liveDocs = reader.liveDocs;
+    assert liveDocs != null;
+    if (matchingFieldsReader != null) {
+      // We can bulk-copy because the fieldInfos are "congruent"
+      for (int j = 0; j < maxDoc;) {
+        if (!liveDocs.get(j)) {
+          // skip deleted docs
+          ++j;
+          continue;
+        }
+        // We can optimize this case (doing a bulk byte copy) since the field
+        // numbers are identical
+        int start = j, numDocs = 0;
+        do {
+          j++;
+          numDocs++;
+          if (j >= maxDoc) break;
+          if (!liveDocs.get(j)) {
+            j++;
+            break;
+          }
+        } while(numDocs < MAX_RAW_MERGE_DOCS);
+
+        IndexInput stream = matchingFieldsReader.rawDocs(rawDocLengths, start, numDocs);
+        addRawDocuments(stream, rawDocLengths, numDocs);
+        docCount += numDocs;
+        mergeState.checkAbort.work(300 * numDocs);
+      }
+    } else {
+      for (int j = 0; j < maxDoc; j++) {
+        if (!liveDocs.get(j)) {
+          // skip deleted docs
+          continue;
+        }
+        // TODO: this could be more efficient using
+        // FieldVisitor instead of loading/writing entire
+        // doc; ie we just have to renumber the field number
+        // on the fly?
+        // NOTE: it's very important to first assign to doc then pass it to
+        // fieldsWriter.addDocument; see LUCENE-1282
+        Document doc = reader.reader.document(j);
+        addDocument(doc, mergeState.fieldInfos);
+        docCount++;
+        mergeState.checkAbort.work(300);
+      }
+    }
+    return docCount;
+  }
+
+  private int copyFieldsNoDeletions(MergeState mergeState, final MergeState.IndexReaderAndLiveDocs reader,
+                                    final DefaultFieldsReader matchingFieldsReader, int rawDocLengths[])
+    throws IOException, MergeAbortedException, CorruptIndexException {
+    final int maxDoc = reader.reader.maxDoc();
+    int docCount = 0;
+    if (matchingFieldsReader != null) {
+      // We can bulk-copy because the fieldInfos are "congruent"
+      while (docCount < maxDoc) {
+        int len = Math.min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
+        IndexInput stream = matchingFieldsReader.rawDocs(rawDocLengths, docCount, len);
+        addRawDocuments(stream, rawDocLengths, len);
+        docCount += len;
+        mergeState.checkAbort.work(300 * len);
+      }
+    } else {
+      for (; docCount < maxDoc; docCount++) {
+        // NOTE: it's very important to first assign to doc then pass it to
+        // fieldsWriter.addDocument; see LUCENE-1282
+        Document doc = reader.reader.document(docCount);
+        addDocument(doc, mergeState.fieldInfos);
+        mergeState.checkAbort.work(300);
+      }
+    }
+    return docCount;
+  }
 }

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsReader.java?rev=1198916&r1=1198915&r2=1198916&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsReader.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsReader.java Mon Nov  7 20:19:59 2011
@@ -5,7 +5,6 @@ import java.io.IOException;
 
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.StoredFieldVisitor;
-import org.apache.lucene.store.IndexInput;
 
 /**
  * Copyright 2004 The Apache Software Foundation
@@ -27,12 +26,6 @@ public abstract class FieldsReader imple
   
   public abstract void visitDocument(int n, StoredFieldVisitor visitor) throws CorruptIndexException, IOException;
   
-  /** Returns the length in bytes of each raw document in a
-   *  contiguous range of length numDocs starting with
-   *  startDocID.  Returns the IndexInput (the fieldStream),
-   *  already seeked to the starting point for startDocID.*/
-  public abstract IndexInput rawDocs(int[] lengths, int startDocID, int numDocs) throws IOException;
-
   public abstract int size();
 
   public abstract FieldsReader clone();

Modified: lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsWriter.java?rev=1198916&r1=1198915&r2=1198916&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsWriter.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/java/org/apache/lucene/index/codecs/FieldsWriter.java Mon Nov  7 20:19:59 2011
@@ -7,7 +7,6 @@ import org.apache.lucene.document.Docume
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.MergeState;
-import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.Bits;
 
 /**
@@ -30,13 +29,6 @@ public abstract class FieldsWriter imple
 
   public abstract void addDocument(Iterable<? extends IndexableField> doc, FieldInfos fieldInfos) throws IOException;
   
-  /** Bulk write a contiguous series of documents.  The
-   *  lengths array is the length (in bytes) of each raw
-   *  document.  The stream IndexInput is the
-   *  fieldsStream from which we should bulk-copy all
-   *  bytes. */
-  public abstract void addRawDocuments(IndexInput stream, int[] lengths, int numDocs) throws IOException;
-  
   public abstract void startDocument(int numStoredFields) throws IOException;
   
   public abstract void skipDocument() throws IOException;
@@ -52,34 +44,22 @@ public abstract class FieldsWriter imple
     int docCount = 0;
     for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
       final int maxDoc = reader.reader.maxDoc();
-      if (reader.liveDocs != null) {
-        final Bits liveDocs = reader.liveDocs;
-        assert liveDocs != null;
-        for (int i = 0; i < maxDoc; i++) {
-          if (!liveDocs.get(i)) {
-            // skip deleted docs
-            continue;
-          }
-          // TODO: this could be more efficient using
-          // FieldVisitor instead of loading/writing entire
-          // doc; ie we just have to renumber the field number
-          // on the fly?
-          // NOTE: it's very important to first assign to doc then pass it to
-          // fieldsWriter.addDocument; see LUCENE-1282
-          Document doc = reader.reader.document(i);
-          addDocument(doc, mergeState.fieldInfos);
-          docCount++;
-          mergeState.checkAbort.work(300);
-        }
-      } else {
-        for (int i = 0; i < maxDoc; i++) {
-          // NOTE: it's very important to first assign to doc then pass it to
-          // fieldsWriter.addDocument; see LUCENE-1282
-          Document doc = reader.reader.document(docCount);
-          addDocument(doc, mergeState.fieldInfos);
-          docCount++;
-          mergeState.checkAbort.work(300);
+      final Bits liveDocs = reader.liveDocs;
+      for (int i = 0; i < maxDoc; i++) {
+        if (liveDocs != null && !liveDocs.get(i)) {
+          // skip deleted docs
+          continue;
         }
+        // TODO: this could be more efficient using
+        // FieldVisitor instead of loading/writing entire
+        // doc; ie we just have to renumber the field number
+        // on the fly?
+        // NOTE: it's very important to first assign to doc then pass it to
+        // fieldsWriter.addDocument; see LUCENE-1282
+        Document doc = reader.reader.document(i);
+        addDocument(doc, mergeState.fieldInfos);
+        docCount++;
+        mergeState.checkAbort.work(300);
       }
     }
     return docCount;