You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by nd...@apache.org on 2020/10/30 17:43:59 UTC

[hbase] branch branch-2 updated: HBASE-24419 Normalizer merge plans should consider more than 2 regions when possible

This is an automated email from the ASF dual-hosted git repository.

ndimiduk pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new b84e2f5  HBASE-24419 Normalizer merge plans should consider more than 2 regions when possible
b84e2f5 is described below

commit b84e2f598bf2485bb80fb25c57f92837f6151ab1
Author: Nick Dimiduk <nd...@apache.org>
AuthorDate: Wed Sep 30 16:48:01 2020 -0700

    HBASE-24419 Normalizer merge plans should consider more than 2 regions when possible
    
    The core change here is to the loop in
    `SimpleRegionNormalizer#computeMergeNormalizationPlans`. It's a nested
    loop that walks the table's region chain once, looking for contiguous
    sequences of regions that meet the criteria for merge. The outer loop
    tracks the starting point of the next sequence, the inner loop looks
    for the end of that sequence. A single sequence becomes an instance of
    `MergeNormalizationPlan`.
    
    Signed-off-by: Huaxiang Sun <hu...@apache.org>
---
 .../org/apache/hadoop/hbase/MatcherPredicate.java  |  65 ++++++++
 .../master/normalizer/MergeNormalizationPlan.java  |   6 +
 .../master/normalizer/NormalizationTarget.java     |   3 +-
 .../master/normalizer/SimpleRegionNormalizer.java  |  82 ++++++----
 .../normalizer/TestSimpleRegionNormalizer.java     |  64 +++++++-
 .../TestSimpleRegionNormalizerOnCluster.java       | 167 ++++++++++++---------
 6 files changed, 287 insertions(+), 100 deletions(-)

diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java
new file mode 100644
index 0000000..695c026
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/MatcherPredicate.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.function.Supplier;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.StringDescription;
+
+/**
+ * An implementation of {@link Waiter.ExplainingPredicate} that uses Hamcrest {@link Matcher} for
+ * both predicate evaluation and explanation.
+ *
+ * @param <T> The type of value to be evaluated via {@link Matcher}.
+ */
+@InterfaceAudience.Private
+public class MatcherPredicate<T> implements Waiter.ExplainingPredicate<RuntimeException> {
+
+  private final String reason;
+  private final Supplier<T> supplier;
+  private final Matcher<? super T> matcher;
+  private T currentValue;
+
+  public MatcherPredicate(final Supplier<T> supplier, final Matcher<? super T> matcher) {
+    this("", supplier, matcher);
+  }
+
+  public MatcherPredicate(final String reason, final Supplier<T> supplier,
+    final Matcher<? super T> matcher) {
+    this.reason = reason;
+    this.supplier = supplier;
+    this.matcher = matcher;
+    this.currentValue = null;
+  }
+
+  @Override public boolean evaluate() {
+    currentValue = supplier.get();
+    return matcher.matches(currentValue);
+  }
+
+  @Override public String explainFailure() {
+    final Description description = new StringDescription()
+      .appendText(reason)
+      .appendText("\nExpected: ").appendDescriptionOf(matcher)
+      .appendText("\n     but: ");
+    matcher.describeMismatch(currentValue, description);
+    return description.toString();
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java
index 677b9ec..f5a7286 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/MergeNormalizationPlan.java
@@ -96,6 +96,12 @@ final class MergeNormalizationPlan implements NormalizationPlan {
 
     private final List<NormalizationTarget> normalizationTargets = new LinkedList<>();
 
+    public Builder setTargets(final List<NormalizationTarget> targets) {
+      normalizationTargets.clear();
+      normalizationTargets.addAll(targets);
+      return this;
+    }
+
     public Builder addTarget(final RegionInfo regionInfo, final long regionSizeMb) {
       normalizationTargets.add(new NormalizationTarget(regionInfo, regionSizeMb));
       return this;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java
index 9e4b3f4..9549028 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationTarget.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.normalizer;
 
+import java.util.Objects;
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.commons.lang3.builder.ToStringBuilder;
@@ -33,7 +34,7 @@ class NormalizationTarget {
   private final long regionSizeMb;
 
   NormalizationTarget(final RegionInfo regionInfo, final long regionSizeMb) {
-    this.regionInfo = regionInfo;
+    this.regionInfo = Objects.requireNonNull(regionInfo);
     this.regionSizeMb = regionSizeMb;
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
index a641a0a..062e401 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hbase.master.normalizer;
 
+import static org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils.isEmpty;
 import java.io.IOException;
 import java.time.Instant;
 import java.time.Period;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Objects;
 import java.util.function.BooleanSupplier;
@@ -41,7 +43,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
 
 /**
  * Simple implementation of region normalizer. Logic in use:
@@ -77,7 +78,7 @@ class SimpleRegionNormalizer implements RegionNormalizer {
   private boolean mergeEnabled;
   private int minRegionCount;
   private Period mergeMinRegionAge;
-  private int mergeMinRegionSizeMb;
+  private long mergeMinRegionSizeMb;
 
   public SimpleRegionNormalizer() {
     splitEnabled = DEFAULT_SPLIT_ENABLED;
@@ -124,10 +125,10 @@ class SimpleRegionNormalizer implements RegionNormalizer {
     return Period.ofDays(settledValue);
   }
 
-  private static int parseMergeMinRegionSizeMb(final Configuration conf) {
-    final int parsedValue =
-      conf.getInt(MERGE_MIN_REGION_SIZE_MB_KEY, DEFAULT_MERGE_MIN_REGION_SIZE_MB);
-    final int settledValue = Math.max(0, parsedValue);
+  private static long parseMergeMinRegionSizeMb(final Configuration conf) {
+    final long parsedValue =
+      conf.getLong(MERGE_MIN_REGION_SIZE_MB_KEY, DEFAULT_MERGE_MIN_REGION_SIZE_MB);
+    final long settledValue = Math.max(0, parsedValue);
     if (parsedValue != settledValue) {
       warnInvalidValue(MERGE_MIN_REGION_SIZE_MB_KEY, parsedValue, settledValue);
     }
@@ -171,7 +172,7 @@ class SimpleRegionNormalizer implements RegionNormalizer {
   /**
    * Return this instance's configured value for {@value #MERGE_MIN_REGION_SIZE_MB_KEY}.
    */
-  public int getMergeMinRegionSizeMb() {
+  public long getMergeMinRegionSizeMb() {
     return mergeMinRegionSizeMb;
   }
 
@@ -198,7 +199,7 @@ class SimpleRegionNormalizer implements RegionNormalizer {
     }
 
     final NormalizeContext ctx = new NormalizeContext(table);
-    if (CollectionUtils.isEmpty(ctx.getTableRegions())) {
+    if (isEmpty(ctx.getTableRegions())) {
       return Collections.emptyList();
     }
 
@@ -251,7 +252,7 @@ class SimpleRegionNormalizer implements RegionNormalizer {
    * Also make sure tableRegions contains regions of the same table
    */
   private double getAverageRegionSizeMb(final List<RegionInfo> tableRegions) {
-    if (CollectionUtils.isEmpty(tableRegions)) {
+    if (isEmpty(tableRegions)) {
       throw new IllegalStateException(
         "Cannot calculate average size of a table without any regions.");
     }
@@ -315,35 +316,60 @@ class SimpleRegionNormalizer implements RegionNormalizer {
    * towards target average or target region count.
    */
   private List<NormalizationPlan> computeMergeNormalizationPlans(final NormalizeContext ctx) {
-    if (ctx.getTableRegions().size() < minRegionCount) {
+    if (isEmpty(ctx.getTableRegions()) || ctx.getTableRegions().size() < minRegionCount) {
       LOG.debug("Table {} has {} regions, required min number of regions for normalizer to run"
         + " is {}, not computing merge plans.", ctx.getTableName(), ctx.getTableRegions().size(),
         minRegionCount);
       return Collections.emptyList();
     }
 
-    final double avgRegionSizeMb = ctx.getAverageRegionSizeMb();
+    final long avgRegionSizeMb = (long) ctx.getAverageRegionSizeMb();
+    if (avgRegionSizeMb < mergeMinRegionSizeMb) {
+      return Collections.emptyList();
+    }
     LOG.debug("Computing normalization plan for table {}. average region size: {}, number of"
       + " regions: {}.", ctx.getTableName(), avgRegionSizeMb, ctx.getTableRegions().size());
 
-    final List<NormalizationPlan> plans = new ArrayList<>();
-    for (int candidateIdx = 0; candidateIdx < ctx.getTableRegions().size() - 1; candidateIdx++) {
-      final RegionInfo current = ctx.getTableRegions().get(candidateIdx);
-      final RegionInfo next = ctx.getTableRegions().get(candidateIdx + 1);
-      if (skipForMerge(ctx.getRegionStates(), current)
-        || skipForMerge(ctx.getRegionStates(), next)) {
-        continue;
+    // this nested loop walks the table's region chain once, looking for contiguous sequences of
+    // regions that meet the criteria for merge. The outer loop tracks the starting point of the
+    // next sequence, the inner loop looks for the end of that sequence. A single sequence becomes
+    // an instance of MergeNormalizationPlan.
+
+    final List<NormalizationPlan> plans = new LinkedList<>();
+    final List<NormalizationTarget> rangeMembers = new LinkedList<>();
+    long sumRangeMembersSizeMb;
+    int current = 0;
+    for (int rangeStart = 0;
+         rangeStart < ctx.getTableRegions().size() - 1 && current < ctx.getTableRegions().size();) {
+      // walk the region chain looking for contiguous sequences of regions that can be merged.
+      rangeMembers.clear();
+      sumRangeMembersSizeMb = 0;
+      for (current = rangeStart; current < ctx.getTableRegions().size(); current++) {
+        final RegionInfo regionInfo = ctx.getTableRegions().get(current);
+        final long regionSizeMb = getRegionSizeMB(regionInfo);
+        if (skipForMerge(ctx.getRegionStates(), regionInfo)) {
+          // this region cannot participate in a range. resume the outer loop.
+          rangeStart = Math.max(current, rangeStart + 1);
+          break;
+        }
+        if (rangeMembers.isEmpty() // when there are no range members, seed the range with whatever
+                                   // we have. this way we're prepared in case the next region is
+                                   // 0-size.
+          || regionSizeMb == 0 // always add an empty region to the current range.
+          || (regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb)) { // add the current region
+                                                                          // to the range when
+                                                                          // there's capacity
+                                                                          // remaining.
+          rangeMembers.add(new NormalizationTarget(regionInfo, regionSizeMb));
+          sumRangeMembersSizeMb += regionSizeMb;
+          continue;
+        }
+        // we have accumulated enough regions to fill a range. resume the outer loop.
+        rangeStart = Math.max(current, rangeStart + 1);
+        break;
       }
-      final long currentSizeMb = getRegionSizeMB(current);
-      final long nextSizeMb = getRegionSizeMB(next);
-      // always merge away empty regions when they present themselves.
-      if (currentSizeMb == 0 || nextSizeMb == 0 || currentSizeMb + nextSizeMb < avgRegionSizeMb) {
-        final MergeNormalizationPlan plan = new MergeNormalizationPlan.Builder()
-          .addTarget(current, currentSizeMb)
-          .addTarget(next, nextSizeMb)
-          .build();
-        plans.add(plan);
-        candidateIdx++;
+      if (rangeMembers.size() > 1) {
+        plans.add(new MergeNormalizationPlan.Builder().setTargets(rangeMembers).build());
       }
     }
     return plans;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
index f263cbc..33b3297 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
@@ -33,6 +33,7 @@ import static org.hamcrest.Matchers.instanceOf;
 import static org.hamcrest.Matchers.not;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
 import static org.mockito.Mockito.when;
@@ -225,7 +226,7 @@ public class TestSimpleRegionNormalizer {
   }
 
   @Test
-  public void testSplitWithTargetRegionSize() throws Exception {
+  public void testWithTargetRegionSize() throws Exception {
     final TableName tableName = name.getTableName();
     final List<RegionInfo> regionInfos = createRegionInfos(tableName, 6);
     final Map<byte[], Integer> regionSizes =
@@ -251,8 +252,6 @@ public class TestSimpleRegionNormalizer {
         new MergeNormalizationPlan.Builder()
           .addTarget(regionInfos.get(0), 20)
           .addTarget(regionInfos.get(1), 40)
-          .build(),
-        new MergeNormalizationPlan.Builder()
           .addTarget(regionInfos.get(2), 60)
           .addTarget(regionInfos.get(3), 80)
           .build()));
@@ -392,7 +391,7 @@ public class TestSimpleRegionNormalizer {
   }
 
   @Test
-  public void testMergeEmptyRegions() {
+  public void testMergeEmptyRegions0() {
     conf.setBoolean(SPLIT_ENABLED_KEY, false);
     conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0);
     final TableName tableName = name.getTableName();
@@ -418,6 +417,63 @@ public class TestSimpleRegionNormalizer {
         .build()));
   }
 
+  @Test
+  public void testMergeEmptyRegions1() {
+    conf.setBoolean(SPLIT_ENABLED_KEY, false);
+    conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0);
+    final TableName tableName = name.getTableName();
+    final List<RegionInfo> regionInfos = createRegionInfos(tableName, 8);
+    final Map<byte[], Integer> regionSizes =
+      createRegionSizesMap(regionInfos, 0, 1, 10, 0, 9, 0, 10, 0);
+    setupMocksForNormalizer(regionSizes, regionInfos);
+
+    assertFalse(normalizer.isSplitEnabled());
+    assertEquals(0, normalizer.getMergeMinRegionSizeMb());
+    assertThat(normalizer.computePlansForTable(tableName), contains(
+      new MergeNormalizationPlan.Builder()
+        .addTarget(regionInfos.get(0), 0)
+        .addTarget(regionInfos.get(1), 1)
+        .build(),
+      new MergeNormalizationPlan.Builder()
+        .addTarget(regionInfos.get(2), 10)
+        .addTarget(regionInfos.get(3), 0)
+        .build(),
+      new MergeNormalizationPlan.Builder()
+        .addTarget(regionInfos.get(4), 9)
+        .addTarget(regionInfos.get(5), 0)
+        .build(),
+      new MergeNormalizationPlan.Builder()
+        .addTarget(regionInfos.get(6), 10)
+        .addTarget(regionInfos.get(7), 0)
+        .build()));
+  }
+
+  @Test
+  public void testSplitAndMultiMerge() {
+    conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 0);
+    final TableName tableName = name.getTableName();
+    final List<RegionInfo> regionInfos = createRegionInfos(tableName, 8);
+    final Map<byte[], Integer> regionSizes =
+      createRegionSizesMap(regionInfos, 3, 1, 1, 30, 9, 3, 1, 0);
+    setupMocksForNormalizer(regionSizes, regionInfos);
+
+    assertTrue(normalizer.isMergeEnabled());
+    assertTrue(normalizer.isSplitEnabled());
+    assertEquals(0, normalizer.getMergeMinRegionSizeMb());
+    assertThat(normalizer.computePlansForTable(tableName), contains(
+      new SplitNormalizationPlan(regionInfos.get(3), 30),
+      new MergeNormalizationPlan.Builder()
+        .addTarget(regionInfos.get(0), 3)
+        .addTarget(regionInfos.get(1), 1)
+        .addTarget(regionInfos.get(2), 1)
+        .build(),
+      new MergeNormalizationPlan.Builder()
+        .addTarget(regionInfos.get(5), 3)
+        .addTarget(regionInfos.get(6), 1)
+        .addTarget(regionInfos.get(7), 0)
+        .build()));
+  }
+
   // This test is to make sure that normalizer is only going to merge adjacent regions.
   @Test
   public void testNormalizerCannotMergeNonAdjacentRegions() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
index 9a3864a..79882bc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
@@ -17,11 +17,16 @@
  */
 package org.apache.hadoop.hbase.master.normalizer;
 
+import static org.hamcrest.Matchers.comparesEqualTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.not;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
@@ -29,6 +34,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MatcherPredicate;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
@@ -55,6 +61,8 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.LoadTestKVGenerator;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -144,7 +152,7 @@ public class TestSimpleRegionNormalizerOnCluster {
 
       assertFalse(admin.normalizerSwitch(true));
       assertTrue(admin.normalize());
-      waitForTableSplit(tn1, tn1RegionCount + 1);
+      waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1));
 
       // confirm that tn1 has (tn1RegionCount + 1) number of regions.
       // tn2 has tn2RegionCount number of regions because normalizer has not been enabled on it.
@@ -161,7 +169,7 @@ public class TestSimpleRegionNormalizerOnCluster {
         tn2RegionCount,
         getRegionCount(tn2));
       LOG.debug("waiting for t3 to settle...");
-      waitForTableRegionCount(tn3, tn3RegionCount);
+      waitForTableRegionCount(tn3, comparesEqualTo(tn3RegionCount));
     } finally {
       dropIfExists(tn1);
       dropIfExists(tn2);
@@ -198,7 +206,7 @@ public class TestSimpleRegionNormalizerOnCluster {
           currentRegionCount,
           getRegionCount(tableName));
       } else {
-        waitForTableSplit(tableName, currentRegionCount + 1);
+        waitForTableRegionCount(tableName, greaterThanOrEqualTo(currentRegionCount + 1));
         assertEquals(
           tableName + " should have split.",
           currentRegionCount + 1,
@@ -216,7 +224,7 @@ public class TestSimpleRegionNormalizerOnCluster {
       final int currentRegionCount = createTableBegsMerge(tableName);
       assertFalse(admin.normalizerSwitch(true));
       assertTrue(admin.normalize());
-      waitForTableMerge(tableName, currentRegionCount - 1);
+      waitForTableRegionCount(tableName, lessThanOrEqualTo(currentRegionCount - 1));
       assertEquals(
         tableName + " should have merged.",
         currentRegionCount - 1,
@@ -242,7 +250,7 @@ public class TestSimpleRegionNormalizerOnCluster {
 
       assertFalse(admin.normalizerSwitch(true));
       assertTrue(admin.normalize(ntfp));
-      waitForTableSplit(tn1, tn1RegionCount + 1);
+      waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1));
 
       // confirm that tn1 has (tn1RegionCount + 1) number of regions.
       // tn2 has tn2RegionCount number of regions because it's not a member of the target namespace.
@@ -250,7 +258,7 @@ public class TestSimpleRegionNormalizerOnCluster {
         tn1 + " should have split.",
         tn1RegionCount + 1,
         getRegionCount(tn1));
-      waitForTableRegionCount(tn2, tn2RegionCount);
+      waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount));
     } finally {
       dropIfExists(tn1);
       dropIfExists(tn2);
@@ -271,7 +279,7 @@ public class TestSimpleRegionNormalizerOnCluster {
 
       assertFalse(admin.normalizerSwitch(true));
       assertTrue(admin.normalize(ntfp));
-      waitForTableSplit(tn1, tn1RegionCount + 1);
+      waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1));
 
       // confirm that tn1 has (tn1RegionCount + 1) number of regions.
       // tn2 has tn2RegionCount number of regions because it fails filter.
@@ -279,7 +287,7 @@ public class TestSimpleRegionNormalizerOnCluster {
         tn1 + " should have split.",
         tn1RegionCount + 1,
         getRegionCount(tn1));
-      waitForTableRegionCount(tn2, tn2RegionCount);
+      waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount));
     } finally {
       dropIfExists(tn1);
       dropIfExists(tn2);
@@ -300,7 +308,7 @@ public class TestSimpleRegionNormalizerOnCluster {
 
       assertFalse(admin.normalizerSwitch(true));
       assertTrue(admin.normalize(ntfp));
-      waitForTableSplit(tn1, tn1RegionCount + 1);
+      waitForTableRegionCount(tn1, greaterThanOrEqualTo(tn1RegionCount + 1));
 
       // confirm that tn1 has (tn1RegionCount + 1) number of regions.
       // tn2 has tn3RegionCount number of regions because it fails filter:
@@ -308,13 +316,33 @@ public class TestSimpleRegionNormalizerOnCluster {
         tn1 + " should have split.",
         tn1RegionCount + 1,
         getRegionCount(tn1));
-      waitForTableRegionCount(tn2, tn2RegionCount);
+      waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount));
     } finally {
       dropIfExists(tn1);
       dropIfExists(tn2);
     }
   }
 
+  /**
+   * A test for when a region is the target of both a split and a merge plan. Does not define
+   * expected behavior, only that some change is applied to the table.
+   */
+  @Test
+  public void testTargetOfSplitAndMerge() throws Exception {
+    final TableName tn = TableName.valueOf(name.getMethodName());
+    try {
+      final int tnRegionCount = createTableTargetOfSplitAndMerge(tn);
+      assertFalse(admin.normalizerSwitch(true));
+      assertTrue(admin.normalize());
+      TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>(
+        "expected " + tn + " to split or merge (probably split)",
+        () -> getRegionCountUnchecked(tn),
+        not(comparesEqualTo(tnRegionCount))));
+    } finally {
+      dropIfExists(tn);
+    }
+  }
+
   private static TableName buildTableNameForQuotaTest(final String methodName) throws Exception {
     String nsp = "np2";
     NamespaceDescriptor nspDesc =
@@ -326,74 +354,30 @@ public class TestSimpleRegionNormalizerOnCluster {
   }
 
   private static void waitForSkippedSplits(final HMaster master,
-    final long existingSkippedSplitCount) throws Exception {
-    TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate<Exception>() {
-      @Override public String explainFailure() {
-        return "waiting to observe split attempt and skipped.";
-      }
-      @Override public boolean evaluate() {
-        final long skippedSplitCount = master.getRegionNormalizerManager()
-          .getSkippedCount(PlanType.SPLIT);
-        return skippedSplitCount > existingSkippedSplitCount;
-      }
-    });
+    final long existingSkippedSplitCount) {
+    TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>(
+      "waiting to observe split attempt and skipped.",
+      () -> master.getRegionNormalizerManager().getSkippedCount(PlanType.SPLIT),
+      Matchers.greaterThan(existingSkippedSplitCount)));
   }
 
   private static void waitForTableRegionCount(final TableName tableName,
-      final int targetRegionCount) throws IOException {
-    TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate<IOException>() {
-      @Override
-      public String explainFailure() {
-        return "expected " + targetRegionCount + " number of regions for table " + tableName;
-      }
-
-      @Override
-      public boolean evaluate() throws IOException {
-        final int currentRegionCount = getRegionCount(tableName);
-        return currentRegionCount == targetRegionCount;
-      }
-    });
-  }
-
-  private static void waitForTableSplit(final TableName tableName, final int targetRegionCount)
-      throws IOException {
-    TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate<IOException>() {
-      @Override
-      public String explainFailure() {
-        return "expected normalizer to split region.";
-      }
-
-      @Override
-      public boolean evaluate() throws IOException {
-        final int currentRegionCount = getRegionCount(tableName);
-        return currentRegionCount >= targetRegionCount;
-      }
-    });
-  }
-
-  private static void waitForTableMerge(final TableName tableName, final int targetRegionCount)
-      throws IOException {
-    TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate<IOException>() {
-      @Override
-      public String explainFailure() {
-        return "expected normalizer to merge regions.";
-      }
-
-      @Override
-      public boolean evaluate() throws IOException {
-        final int currentRegionCount = getRegionCount(tableName);
-        return currentRegionCount <= targetRegionCount;
-      }
-    });
+    Matcher<? super Integer> matcher) {
+    TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>(
+      "region count for table " + tableName + " does not match expected",
+      () -> getRegionCountUnchecked(tableName),
+      matcher));
   }
 
   private static List<HRegion> generateTestData(final TableName tableName,
     final int... regionSizesMb) throws IOException {
     final List<HRegion> generatedRegions;
     final int numRegions = regionSizesMb.length;
+    LOG.debug("generating test data into {}, {} regions of sizes (mb) {}", tableName, numRegions,
+      regionSizesMb);
     try (Table ignored = TEST_UTIL.createMultiRegionTable(tableName, FAMILY_NAME, numRegions)) {
       // Need to get sorted list of regions here
-      generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
+      generatedRegions = new ArrayList<>(TEST_UTIL.getHBaseCluster().getRegions(tableName));
       generatedRegions.sort(Comparator.comparing(HRegion::getRegionInfo, RegionInfo.COMPARATOR));
       assertEquals(numRegions, generatedRegions.size());
       for (int i = 0; i < numRegions; i++) {
@@ -407,6 +391,7 @@ public class TestSimpleRegionNormalizerOnCluster {
 
   private static void generateTestData(Region region, int numRows) throws IOException {
     // generating 1Mb values
+    LOG.debug("writing {}mb to {}", numRows, region);
     LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(1024 * 1024, 1024 * 1024);
     for (int i = 0; i < numRows; ++i) {
       byte[] key = Bytes.add(region.getRegionInfo().getStartKey(), Bytes.toBytes(i));
@@ -513,6 +498,46 @@ public class TestSimpleRegionNormalizerOnCluster {
     return 5;
   }
 
+  /**
+   * Create a table with 4 regions, having region sizes so as to provoke a split of the largest
+   * region and a merge of an empty region into the largest.
+   * <ul>
+   *   <li>total table size: 14</li>
+   *   <li>average region size: 3.5</li>
+   * </ul>
+   */
+  private static int createTableTargetOfSplitAndMerge(final TableName tableName) throws Exception {
+    final int[] regionSizesMb = { 10, 0, 2, 2 };
+    final List<HRegion> generatedRegions = generateTestData(tableName, regionSizesMb);
+    assertEquals(4, getRegionCount(tableName));
+    admin.flush(tableName);
+
+    final TableDescriptor td = TableDescriptorBuilder
+      .newBuilder(admin.getDescriptor(tableName))
+      .setNormalizationEnabled(true)
+      .build();
+    admin.modifyTable(td);
+
+    // make sure relatively accurate region statistics are available for the test table. use
+    // the last/largest region as clue.
+    LOG.debug("waiting for region statistics to settle.");
+    TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate<IOException>() {
+      @Override public String explainFailure() {
+        return "expected largest region to be >= 10mb.";
+      }
+      @Override public boolean evaluate() {
+        for (int i = 0; i < generatedRegions.size(); i++) {
+          final RegionInfo regionInfo = generatedRegions.get(i).getRegionInfo();
+          if (!(getRegionSizeMB(master, regionInfo) >= regionSizesMb[i])) {
+            return false;
+          }
+        }
+        return true;
+      }
+    });
+    return 4;
+  }
+
   private static void dropIfExists(final TableName tableName) throws Exception {
     if (tableName != null && admin.tableExists(tableName)) {
       if (admin.isTableEnabled(tableName)) {
@@ -527,4 +552,12 @@ public class TestSimpleRegionNormalizerOnCluster {
       return locator.getAllRegionLocations().size();
     }
   }
+
+  private static int getRegionCountUnchecked(final TableName tableName) {
+    try {
+      return getRegionCount(tableName);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
 }