You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by jp...@apache.org on 2022/05/18 21:06:31 UTC

[lucene] branch branch_9x updated: LUCENE-10574: Prevent pathological merging. (#900)

This is an automated email from the ASF dual-hosted git repository.

jpountz pushed a commit to branch branch_9x
in repository https://gitbox.apache.org/repos/asf/lucene.git


The following commit(s) were added to refs/heads/branch_9x by this push:
     new 62b1e2a1e91 LUCENE-10574: Prevent pathological merging. (#900)
62b1e2a1e91 is described below

commit 62b1e2a1e9100ffa6f0fa60f899f16a565588bd8
Author: Adrien Grand <jp...@gmail.com>
AuthorDate: Wed May 18 23:05:54 2022 +0200

    LUCENE-10574: Prevent pathological merging. (#900)
    
    This updates TieredMergePolicy and Log(Doc|Size)MergePolicy to only ever
    consider merges where the resulting segment would be at least 50% bigger than
    the biggest input segment. While a merge that only grows the biggest segment by
    50% is still quite inefficient, this constraint is good enough to prevent
    pathological O(N^2) merging.
---
 lucene/CHANGES.txt                                 |   3 +-
 .../org/apache/lucene/index/LogMergePolicy.java    |  59 +++++----
 .../org/apache/lucene/index/TieredMergePolicy.java |  19 ++-
 .../lucene/index/TestIndexWriterMergePolicy.java   | 141 ++++++++++++++-------
 .../tests/index/BaseMergePolicyTestCase.java       |  50 ++++++++
 .../apache/lucene/tests/util/LuceneTestCase.java   |  65 ----------
 6 files changed, 198 insertions(+), 139 deletions(-)

diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 8fdd95d85c7..846e09014af 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -23,7 +23,8 @@ Optimizations
 
 Bug Fixes
 ---------------------
-(No changes)
+
+* LUCENE-10574: Prevent pathological O(N^2) merging. (Adrien Grand)
 
 Other
 ---------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
index 4c3940857e3..51a5d5299ef 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
@@ -439,11 +439,13 @@ public abstract class LogMergePolicy extends MergePolicy {
   }
 
   private static class SegmentInfoAndLevel implements Comparable<SegmentInfoAndLevel> {
-    SegmentCommitInfo info;
-    float level;
+    final SegmentCommitInfo info;
+    final long size;
+    final float level;
 
-    public SegmentInfoAndLevel(SegmentCommitInfo info, float level) {
+    public SegmentInfoAndLevel(SegmentCommitInfo info, long size, float level) {
       this.info = info;
+      this.size = size;
       this.level = level;
     }
 
@@ -486,7 +488,7 @@ public abstract class LogMergePolicy extends MergePolicy {
       }
 
       final SegmentInfoAndLevel infoLevel =
-          new SegmentInfoAndLevel(info, (float) Math.log(size) / norm);
+          new SegmentInfoAndLevel(info, size, (float) Math.log(size) / norm);
       levels.add(infoLevel);
 
       if (verbose(mergeContext)) {
@@ -568,8 +570,13 @@ public abstract class LogMergePolicy extends MergePolicy {
       while (end <= 1 + upto) {
         boolean anyTooLarge = false;
         boolean anyMerging = false;
+        long mergeSize = 0;
+        long maxSegmentSize = 0;
         for (int i = start; i < end; i++) {
-          final SegmentCommitInfo info = levels.get(i).info;
+          final SegmentInfoAndLevel segLevel = levels.get(i);
+          mergeSize += segLevel.size;
+          maxSegmentSize = Math.max(maxSegmentSize, segLevel.size);
+          final SegmentCommitInfo info = segLevel.info;
           anyTooLarge |=
               (size(info, mergeContext) >= maxMergeSize
                   || sizeDocs(info, mergeContext) >= maxMergeDocs);
@@ -582,23 +589,31 @@ public abstract class LogMergePolicy extends MergePolicy {
         if (anyMerging) {
           // skip
         } else if (!anyTooLarge) {
-          if (spec == null) spec = new MergeSpecification();
-          final List<SegmentCommitInfo> mergeInfos = new ArrayList<>(end - start);
-          for (int i = start; i < end; i++) {
-            mergeInfos.add(levels.get(i).info);
-            assert infos.contains(levels.get(i).info);
-          }
-          if (verbose(mergeContext)) {
-            message(
-                "  add merge="
-                    + segString(mergeContext, mergeInfos)
-                    + " start="
-                    + start
-                    + " end="
-                    + end,
-                mergeContext);
-          }
-          spec.add(new OneMerge(mergeInfos));
+          if (mergeSize >= maxSegmentSize * 1.5) {
+            // Ignore any merge where the resulting segment is not at least 50% larger than the
+            // biggest input segment.
+            // Otherwise we could run into pathological O(N^2) merging where merges keep rewriting
+            // again and again the biggest input segment into a segment that is barely bigger.
+            if (spec == null) {
+              spec = new MergeSpecification();
+            }
+            final List<SegmentCommitInfo> mergeInfos = new ArrayList<>(end - start);
+            for (int i = start; i < end; i++) {
+              mergeInfos.add(levels.get(i).info);
+              assert infos.contains(levels.get(i).info);
+            }
+            if (verbose(mergeContext)) {
+              message(
+                  "  add merge="
+                      + segString(mergeContext, mergeInfos)
+                      + " start="
+                      + start
+                      + " end="
+                      + end,
+                  mergeContext);
+            }
+            spec.add(new OneMerge(mergeInfos));
+          } // else skip
         } else if (verbose(mergeContext)) {
           message(
               "    "
diff --git a/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java
index a71a0631ff3..d974148b4e4 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java
@@ -237,6 +237,7 @@ public class TieredMergePolicy extends MergePolicy {
 
   private static class SegmentSizeAndDocs {
     private final SegmentCommitInfo segInfo;
+    /// Size of the segment in bytes, pro-rated by the number of live documents.
     private final long sizeInBytes;
     private final int delCount;
     private final int maxDoc;
@@ -532,13 +533,21 @@ public class TieredMergePolicy extends MergePolicy {
         // segments, and already pre-excluded the too-large segments:
         assert candidate.size() > 0;
 
+        SegmentSizeAndDocs maxCandidateSegmentSize = segInfosSizes.get(candidate.get(0));
+        if (hitTooLarge == false
+            && mergeType == MERGE_TYPE.NATURAL
+            && bytesThisMerge < maxCandidateSegmentSize.sizeInBytes * 1.5) {
+          // Ignore any merge where the resulting segment is not at least 50% larger than the
+          // biggest input segment.
+          // Otherwise we could run into pathological O(N^2) merging where merges keep rewriting
+          // again and again the biggest input segment into a segment that is barely bigger.
+          continue;
+        }
+
         // A singleton merge with no deletes makes no sense. We can get here when forceMerge is
         // looping around...
-        if (candidate.size() == 1) {
-          SegmentSizeAndDocs segSizeDocs = segInfosSizes.get(candidate.get(0));
-          if (segSizeDocs.delCount == 0) {
-            continue;
-          }
+        if (candidate.size() == 1 && maxCandidateSegmentSize.delCount == 0) {
+          continue;
         }
 
         // If we didn't find a too-large merge and have a list of candidates
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
index 1176a993f2f..7c489701c1a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
@@ -19,6 +19,7 @@ package org.apache.lucene.index;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -42,6 +43,74 @@ import org.apache.lucene.tests.util.LuceneTestCase;
 @LuceneTestCase.SuppressCodecs("SimpleText")
 public class TestIndexWriterMergePolicy extends LuceneTestCase {
 
+  /**
+   * A less sophisticated version of LogDocMergePolicy, only for testing the interaction between
+   * IndexWriter and the MergePolicy.
+   */
+  private static class MockMergePolicy extends MergePolicy {
+
+    private int mergeFactor = 10;
+
+    public int getMergeFactor() {
+      return mergeFactor;
+    }
+
+    public void setMergeFactor(int mergeFactor) {
+      this.mergeFactor = mergeFactor;
+    }
+
+    @Override
+    public MergeSpecification findMerges(
+        MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext)
+        throws IOException {
+      List<SegmentCommitInfo> segments = new ArrayList<>();
+      for (SegmentCommitInfo sci : segmentInfos) {
+        segments.add(sci);
+      }
+      MergeSpecification spec = null;
+      for (int start = 0; start < segments.size(); ) {
+        final int end = start + mergeFactor;
+        if (end > segments.size()) {
+          break;
+        }
+        int minDocCount = Integer.MAX_VALUE;
+        int maxDocCount = 0;
+        for (int i = start; i < end; ++i) {
+          int docCount = segments.get(i).info.maxDoc();
+          minDocCount = Math.min(docCount, minDocCount);
+          maxDocCount = Math.max(docCount, maxDocCount);
+        }
+        if (maxDocCount < (long) mergeFactor * minDocCount) {
+          // Segment sizes differ by less than mergeFactor, they can be merged together
+          if (spec == null) {
+            spec = new MergeSpecification();
+          }
+          spec.add(new OneMerge(segments.subList(start, end)));
+          start = end;
+        } else {
+          start++;
+        }
+      }
+      return spec;
+    }
+
+    @Override
+    public MergeSpecification findForcedMerges(
+        SegmentInfos segmentInfos,
+        int maxSegmentCount,
+        Map<SegmentCommitInfo, Boolean> segmentsToMerge,
+        MergeContext mergeContext)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public MergeSpecification findForcedDeletesMerges(
+        SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException {
+      return null;
+    }
+  }
+
   // Test the normal case
   public void testNormalCase() throws IOException {
     Directory dir = newDirectory();
@@ -51,7 +120,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
             dir,
             newIndexWriterConfig(new MockAnalyzer(random()))
                 .setMaxBufferedDocs(10)
-                .setMergePolicy(new LogDocMergePolicy()));
+                .setMergePolicy(new MockMergePolicy()));
 
     for (int i = 0; i < 100; i++) {
       addDoc(writer);
@@ -71,7 +140,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
             dir,
             newIndexWriterConfig(new MockAnalyzer(random()))
                 .setMaxBufferedDocs(10)
-                .setMergePolicy(new LogDocMergePolicy()));
+                .setMergePolicy(new MockMergePolicy()));
 
     boolean noOverMerge = false;
     for (int i = 0; i < 100; i++) {
@@ -91,8 +160,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
   public void testForceFlush() throws IOException {
     Directory dir = newDirectory();
 
-    LogDocMergePolicy mp = new LogDocMergePolicy();
-    mp.setMinMergeDocs(100);
+    MockMergePolicy mp = new MockMergePolicy();
     mp.setMergeFactor(10);
     IndexWriter writer =
         new IndexWriter(
@@ -103,19 +171,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
 
     for (int i = 0; i < 100; i++) {
       addDoc(writer);
-      writer.close();
-
-      mp = new LogDocMergePolicy();
-      mp.setMergeFactor(10);
-      writer =
-          new IndexWriter(
-              dir,
-              newIndexWriterConfig(new MockAnalyzer(random()))
-                  .setOpenMode(OpenMode.APPEND)
-                  .setMaxBufferedDocs(10)
-                  .setMergePolicy(mp));
-      mp.setMinMergeDocs(100);
-      checkInvariants(writer);
+      writer.flush();
     }
 
     writer.close();
@@ -131,7 +187,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
             dir,
             newIndexWriterConfig(new MockAnalyzer(random()))
                 .setMaxBufferedDocs(10)
-                .setMergePolicy(newLogMergePolicy())
+                .setMergePolicy(new MockMergePolicy())
                 .setMergeScheduler(new SerialMergeScheduler()));
 
     for (int i = 0; i < 250; i++) {
@@ -139,7 +195,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
       checkInvariants(writer);
     }
 
-    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
+    ((MockMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
 
     // merge policy only fixes segments on levels where merges
     // have been triggered, so check invariants after all adds
@@ -162,7 +218,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
             dir,
             newIndexWriterConfig(new MockAnalyzer(random()))
                 .setMaxBufferedDocs(101)
-                .setMergePolicy(new LogDocMergePolicy())
+                .setMergePolicy(new MockMergePolicy())
                 .setMergeScheduler(new SerialMergeScheduler()));
 
     // leftmost* segment has 1 doc
@@ -180,12 +236,12 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
               newIndexWriterConfig(new MockAnalyzer(random()))
                   .setOpenMode(OpenMode.APPEND)
                   .setMaxBufferedDocs(101)
-                  .setMergePolicy(new LogDocMergePolicy())
+                  .setMergePolicy(new MockMergePolicy())
                   .setMergeScheduler(new SerialMergeScheduler()));
     }
 
     writer.close();
-    LogDocMergePolicy ldmp = new LogDocMergePolicy();
+    MockMergePolicy ldmp = new MockMergePolicy();
     ldmp.setMergeFactor(10);
     writer =
         new IndexWriter(
@@ -219,7 +275,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
   public void testMergeDocCount0() throws IOException {
     Directory dir = newDirectory();
 
-    LogDocMergePolicy ldmp = new LogDocMergePolicy();
+    MockMergePolicy ldmp = new MockMergePolicy();
     ldmp.setMergeFactor(100);
     IndexWriter writer =
         new IndexWriter(
@@ -243,7 +299,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
     writer.deleteDocuments(new Term("content", "aaa"));
     writer.close();
 
-    ldmp = new LogDocMergePolicy();
+    ldmp = new MockMergePolicy();
     ldmp.setMergeFactor(5);
     writer =
         new IndexWriter(
@@ -277,8 +333,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
   private void checkInvariants(IndexWriter writer) throws IOException {
     writer.waitForMerges();
     int maxBufferedDocs = writer.getConfig().getMaxBufferedDocs();
-    int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
-    int maxMergeDocs = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMaxMergeDocs();
+    int mergeFactor = ((MockMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
 
     int ramSegmentCount = writer.getNumBufferedDocuments();
     assertTrue(ramSegmentCount < maxBufferedDocs);
@@ -310,22 +365,18 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
       if (docCount <= upperBound) {
         numSegments++;
       } else {
-        if (upperBound * mergeFactor <= maxMergeDocs) {
-          assertTrue(
-              "maxMergeDocs="
-                  + maxMergeDocs
-                  + "; numSegments="
-                  + numSegments
-                  + "; upperBound="
-                  + upperBound
-                  + "; mergeFactor="
-                  + mergeFactor
-                  + "; segs="
-                  + writer.segString()
-                  + " config="
-                  + writer.getConfig(),
-              numSegments < mergeFactor);
-        }
+        assertTrue(
+            "numSegments="
+                + numSegments
+                + "; upperBound="
+                + upperBound
+                + "; mergeFactor="
+                + mergeFactor
+                + "; segs="
+                + writer.segString()
+                + " config="
+                + writer.getConfig(),
+            numSegments < mergeFactor);
 
         do {
           lowerBound = upperBound;
@@ -334,16 +385,14 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
         numSegments = 1;
       }
     }
-    if (upperBound * mergeFactor <= maxMergeDocs) {
-      assertTrue(numSegments < mergeFactor);
-    }
+    assertTrue(numSegments < mergeFactor);
   }
 
   private static final double EPSILON = 1E-14;
 
   public void testSetters() {
     assertSetters(new LogByteSizeMergePolicy());
-    assertSetters(new LogDocMergePolicy());
+    assertSetters(new MockMergePolicy());
   }
 
   // Test basic semantics of merge on commit
diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseMergePolicyTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseMergePolicyTestCase.java
index 75d88c74a4b..490c144e24a 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseMergePolicyTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/BaseMergePolicyTestCase.java
@@ -512,4 +512,54 @@ public abstract class BaseMergePolicyTestCase extends LuceneTestCase {
     /** Bytes written through merges. */
     long mergeBytesWritten;
   }
+
+  public void testNoPathologicalMerges() throws IOException {
+    MergePolicy mergePolicy = mergePolicy();
+    IOStats stats = new IOStats();
+    AtomicLong segNameGenerator = new AtomicLong();
+    MergeContext mergeContext = new MockMergeContext(SegmentCommitInfo::getDelCount);
+    SegmentInfos segmentInfos = new SegmentInfos(Version.LATEST.major);
+    // Both the docs per flush and doc size are small because these are the typical cases that used
+    // to trigger pathological O(n^2) merging due to floor segment sizes
+    final double avgDocSizeMB = 10. / 1024 / 1024;
+    final int maxDocsPerFlush = 3;
+    final int totalDocs = 10_000;
+    int numFlushes = 0;
+    for (int numDocs = 0; numDocs < totalDocs; ) {
+      int flushDocCount = TestUtil.nextInt(random(), 1, maxDocsPerFlush);
+      numDocs += flushDocCount;
+      double flushSizeMB = flushDocCount * avgDocSizeMB;
+      stats.flushBytesWritten += flushSizeMB * 1024 * 1024;
+      segmentInfos.add(
+          makeSegmentCommitInfo(
+              "_" + segNameGenerator.getAndIncrement(),
+              flushDocCount,
+              0,
+              flushSizeMB,
+              IndexWriter.SOURCE_FLUSH));
+      ++numFlushes;
+
+      MergeSpecification merges =
+          mergePolicy.findMerges(MergeTrigger.SEGMENT_FLUSH, segmentInfos, mergeContext);
+      while (merges != null) {
+        assertTrue(merges.merges.size() > 0);
+        assertMerge(mergePolicy, merges);
+        for (OneMerge oneMerge : merges.merges) {
+          segmentInfos =
+              applyMerge(segmentInfos, oneMerge, "_" + segNameGenerator.getAndIncrement(), stats);
+        }
+        merges = mergePolicy.findMerges(MergeTrigger.MERGE_FINISHED, segmentInfos, mergeContext);
+      }
+      assertSegmentInfos(mergePolicy, segmentInfos);
+    }
+
+    final double writeAmplification =
+        (double) (stats.flushBytesWritten + stats.mergeBytesWritten) / stats.flushBytesWritten;
+    // Assuming a merge factor of 2, which is the value that triggers the most write amplification,
+    // the total write amplification would be ~ log(numFlushes)/log(2). We allow merge policies to
+    // have a write amplification up to log(numFlushes)/log(1.5). Greater values would indicate a
+    // problem with the merge policy.
+    final double maxAllowedWriteAmplification = Math.log(numFlushes) / Math.log(1.5);
+    assertTrue(writeAmplification < maxAllowedWriteAmplification);
+  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java
index 7160f7554e0..5b03b372f57 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java
@@ -1015,8 +1015,6 @@ public abstract class LuceneTestCase extends Assert {
 
     c.setMergePolicy(newMergePolicy(r));
 
-    avoidPathologicalMerging(c);
-
     if (rarely(r)) {
       c.setMergedSegmentWarmer(new SimpleMergedSegmentWarmer(c.getInfoStream()));
     }
@@ -1034,69 +1032,6 @@ public abstract class LuceneTestCase extends Assert {
     return c;
   }
 
-  private static void avoidPathologicalMerging(IndexWriterConfig iwc) {
-    // Don't allow "tiny" flushed segments with "big" merge
-    // floor: this leads to pathological O(N^2) merge costs:
-    long estFlushSizeBytes = Long.MAX_VALUE;
-    if (iwc.getMaxBufferedDocs() != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
-      // Gross estimation of 1 KB segment bytes for each doc indexed:
-      estFlushSizeBytes = Math.min(estFlushSizeBytes, iwc.getMaxBufferedDocs() * 1024);
-    }
-    if (iwc.getRAMBufferSizeMB() != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
-      estFlushSizeBytes =
-          Math.min(estFlushSizeBytes, (long) (iwc.getRAMBufferSizeMB() * 1024 * 1024));
-    }
-    assert estFlushSizeBytes > 0;
-
-    MergePolicy mp = iwc.getMergePolicy();
-    if (mp instanceof TieredMergePolicy) {
-      TieredMergePolicy tmp = (TieredMergePolicy) mp;
-      long floorSegBytes = (long) (tmp.getFloorSegmentMB() * 1024 * 1024);
-      if (floorSegBytes / estFlushSizeBytes > 10) {
-        double newValue = estFlushSizeBytes * 10.0 / 1024 / 1024;
-        if (VERBOSE) {
-          System.out.println(
-              "NOTE: LuceneTestCase: changing TieredMergePolicy.floorSegmentMB from "
-                  + tmp.getFloorSegmentMB()
-                  + " to "
-                  + newValue
-                  + " to avoid pathological merging");
-        }
-        tmp.setFloorSegmentMB(newValue);
-      }
-    } else if (mp instanceof LogByteSizeMergePolicy) {
-      LogByteSizeMergePolicy lmp = (LogByteSizeMergePolicy) mp;
-      if ((lmp.getMinMergeMB() * 1024 * 1024) / estFlushSizeBytes > 10) {
-        double newValue = estFlushSizeBytes * 10.0 / 1024 / 1024;
-        if (VERBOSE) {
-          System.out.println(
-              "NOTE: LuceneTestCase: changing LogByteSizeMergePolicy.minMergeMB from "
-                  + lmp.getMinMergeMB()
-                  + " to "
-                  + newValue
-                  + " to avoid pathological merging");
-        }
-        lmp.setMinMergeMB(newValue);
-      }
-    } else if (mp instanceof LogDocMergePolicy) {
-      LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
-      assert estFlushSizeBytes / 1024 < Integer.MAX_VALUE / 10;
-      int estFlushDocs = Math.max(1, (int) (estFlushSizeBytes / 1024));
-      if (lmp.getMinMergeDocs() / estFlushDocs > 10) {
-        int newValue = estFlushDocs * 10;
-        if (VERBOSE) {
-          System.out.println(
-              "NOTE: LuceneTestCase: changing LogDocMergePolicy.minMergeDocs from "
-                  + lmp.getMinMergeDocs()
-                  + " to "
-                  + newValue
-                  + " to avoid pathological merging");
-        }
-        lmp.setMinMergeDocs(newValue);
-      }
-    }
-  }
-
   public static MergePolicy newMergePolicy(Random r) {
     return newMergePolicy(r, true);
   }