You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zg...@apache.org on 2020/06/08 05:20:21 UTC

[hbase] branch branch-2 updated: HBASE-24359 Optionally ignore edits for deleted CFs for replication (#1855)

This is an automated email from the ASF dual-hosted git repository.

zghao pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new 67fa367  HBASE-24359 Optionally ignore edits for deleted CFs for replication (#1855)
67fa367 is described below

commit 67fa367bb6f1a22534db045b5b6fbce700474733
Author: XinSun <dd...@gmail.com>
AuthorDate: Mon Jun 8 13:14:00 2020 +0800

    HBASE-24359 Optionally ignore edits for deleted CFs for replication (#1855)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |   6 +-
 .../hadoop/hbase/regionserver/wal/WALUtil.java     |  21 ++
 .../HBaseInterClusterReplicationEndpoint.java      | 226 ++++++++++++++-----
 .../replication/TestReplicationDroppedTables.java  |   9 +-
 ...ReplicationEditsDroppedWithDeletedTableCFs.java | 240 ++++++++++++++++++++
 ...estReplicationEditsDroppedWithDroppedTable.java | 249 +++++++++++++++++++++
 .../TestReplicationStuckWithDeletedTableCFs.java   | 183 +++++++++++++++
 .../TestReplicationStuckWithDroppedTable.java      | 176 +++++++++++++++
 ...InterClusterReplicationEndpointFilterEdits.java | 137 ++++++++++++
 9 files changed, 1190 insertions(+), 57 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index eea169e..622f403 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1339,7 +1339,11 @@ public final class HConstants {
   public static final String REPLICATION_SOURCE_MAXTHREADS_KEY =
       "hbase.replication.source.maxthreads";
 
-  /** Drop edits for tables that been deleted from the replication source and target */
+  /**
+   * Drop edits for tables that been deleted from the replication source and target
+   * @deprecated moved it into HBaseInterClusterReplicationEndpoint
+   */
+  @Deprecated
   public static final String REPLICATION_DROP_ON_DELETED_TABLE_KEY =
       "hbase.replication.drop.on.deleted.table";
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
index 4679882..19f6b96 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
@@ -22,10 +22,12 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Map;
 import java.util.NavigableMap;
+import java.util.function.Function;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -195,4 +197,23 @@ public class WALUtil {
     }
     return conf.getLong(WAL_BLOCK_SIZE, defaultBlockSize);
   }
+
+  public static void filterCells(WALEdit edit, Function<Cell, Cell> mapper) {
+    ArrayList<Cell> cells = edit.getCells();
+    int size = cells.size();
+    int newSize = 0;
+    for (int i = 0; i < size; i++) {
+      Cell cell = mapper.apply(cells.get(i));
+      if (cell != null) {
+        cells.set(newSize, cell);
+        newSize++;
+      }
+    }
+    for (int i = size - 1; i >= newSize; i--) {
+      cells.remove(i);
+    }
+    if (newSize < size / 2) {
+      cells.trimToSize();
+    }
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index 2731de8..9539d30 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -24,8 +24,11 @@ import java.net.SocketTimeoutException;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
@@ -34,40 +37,44 @@ import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.Future;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
+
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
+import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-
 /**
  * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint}
  * implementation for replicating to another HBase cluster.
@@ -86,6 +93,13 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
 
   private static final long DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER = 2;
 
+  /** Drop edits for tables that been deleted from the replication source and target */
+  public static final String REPLICATION_DROP_ON_DELETED_TABLE_KEY =
+      "hbase.replication.drop.on.deleted.table";
+  /** Drop edits for CFs that been deleted from the replication source and target */
+  public static final String REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY =
+      "hbase.replication.drop.on.deleted.columnfamily";
+
   private ClusterConnection conn;
   private Configuration localConf;
   private Configuration conf;
@@ -112,6 +126,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
   private boolean replicationBulkLoadDataEnabled;
   private Abortable abortable;
   private boolean dropOnDeletedTables;
+  private boolean dropOnDeletedColumnFamilies;
   private boolean isSerial = false;
 
   /*
@@ -174,7 +189,9 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
     this.replicationRpcLimit = (int)(0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE,
       RpcServer.DEFAULT_MAX_REQUEST_SIZE));
     this.dropOnDeletedTables =
-        this.conf.getBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
+        this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
+    this.dropOnDeletedColumnFamilies = this.conf
+        .getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false);
 
     this.replicationBulkLoadDataEnabled =
         conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
@@ -285,28 +302,148 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
     }
   }
 
-  private TableName parseTable(String msg) {
-    // ... TableNotFoundException: '<table>'/n...
-    Pattern p = Pattern.compile("TableNotFoundException: '([\\S]*)'");
-    Matcher m = p.matcher(msg);
-    if (m.find()) {
-      String table = m.group(1);
-      try {
-        // double check that table is a valid table name
-        TableName.valueOf(TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(table)));
-        return TableName.valueOf(table);
-      } catch (IllegalArgumentException ignore) {
+  /**
+   * Check if there's an {@link TableNotFoundException} in the caused by stacktrace.
+   */
+  @VisibleForTesting
+  public static boolean isTableNotFoundException(Throwable io) {
+    if (io instanceof RemoteException) {
+      io = ((RemoteException) io).unwrapRemoteException();
+    }
+    if (io != null && io.getMessage().contains("TableNotFoundException")) {
+      return true;
+    }
+    for (; io != null; io = io.getCause()) {
+      if (io instanceof TableNotFoundException) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Check if there's an {@link NoSuchColumnFamilyException} in the caused by stacktrace.
+   */
+  @VisibleForTesting
+  public static boolean isNoSuchColumnFamilyException(Throwable io) {
+    if (io instanceof RemoteException) {
+      io = ((RemoteException) io).unwrapRemoteException();
+    }
+    if (io != null && io.getMessage().contains("NoSuchColumnFamilyException")) {
+      return true;
+    }
+    for (; io != null; io = io.getCause()) {
+      if (io instanceof NoSuchColumnFamilyException) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @VisibleForTesting
+  List<List<Entry>> filterNotExistTableEdits(final List<List<Entry>> oldEntryList) {
+    List<List<Entry>> entryList = new ArrayList<>();
+    Map<TableName, Boolean> existMap = new HashMap<>();
+    try (Connection localConn = ConnectionFactory.createConnection(ctx.getLocalConfiguration());
+         Admin localAdmin = localConn.getAdmin()) {
+      for (List<Entry> oldEntries : oldEntryList) {
+        List<Entry> entries = new ArrayList<>();
+        for (Entry e : oldEntries) {
+          TableName tableName = e.getKey().getTableName();
+          boolean exist = true;
+          if (existMap.containsKey(tableName)) {
+            exist = existMap.get(tableName);
+          } else {
+            try {
+              exist = localAdmin.tableExists(tableName);
+              existMap.put(tableName, exist);
+            } catch (IOException iox) {
+              LOG.warn("Exception checking for local table " + tableName, iox);
+              // we can't drop edits without full assurance, so we assume table exists.
+              exist = true;
+            }
+          }
+          if (exist) {
+            entries.add(e);
+          } else {
+            // Would potentially be better to retry in one of the outer loops
+            // and add a table filter there; but that would break the encapsulation,
+            // so we're doing the filtering here.
+            LOG.warn("Missing table detected at sink, local table also does not exist, "
+                + "filtering edits for table '{}'", tableName);
+          }
+        }
+        if (!entries.isEmpty()) {
+          entryList.add(entries);
+        }
       }
+    } catch (IOException iox) {
+      LOG.warn("Exception when creating connection to check local table", iox);
+      return oldEntryList;
     }
-    return null;
+    return entryList;
   }
 
-  // Filter a set of batches by TableName
-  private List<List<Entry>> filterBatches(final List<List<Entry>> oldEntryList, TableName table) {
-    return oldEntryList
-        .stream().map(entries -> entries.stream()
-            .filter(e -> !e.getKey().getTableName().equals(table)).collect(Collectors.toList()))
-        .collect(Collectors.toList());
+  @VisibleForTesting
+  List<List<Entry>> filterNotExistColumnFamilyEdits(final List<List<Entry>> oldEntryList) {
+    List<List<Entry>> entryList = new ArrayList<>();
+    Map<TableName, Set<String>> existColumnFamilyMap = new HashMap<>();
+    try (Connection localConn = ConnectionFactory.createConnection(ctx.getLocalConfiguration());
+         Admin localAdmin = localConn.getAdmin()) {
+      for (List<Entry> oldEntries : oldEntryList) {
+        List<Entry> entries = new ArrayList<>();
+        for (Entry e : oldEntries) {
+          TableName tableName = e.getKey().getTableName();
+          if (!existColumnFamilyMap.containsKey(tableName)) {
+            try {
+              Set<String> cfs = localAdmin.getDescriptor(tableName).getColumnFamilyNames().stream()
+                  .map(Bytes::toString).collect(Collectors.toSet());
+              existColumnFamilyMap.put(tableName, cfs);
+            } catch (Exception ex) {
+              LOG.warn("Exception getting cf names for local table {}", tableName, ex);
+              // if catch any exception, we are not sure about table's description,
+              // so replicate raw entry
+              entries.add(e);
+              continue;
+            }
+          }
+
+          Set<String> existColumnFamilies = existColumnFamilyMap.get(tableName);
+          Set<String> missingCFs = new HashSet<>();
+          WALEdit walEdit = new WALEdit();
+          walEdit.getCells().addAll(e.getEdit().getCells());
+          WALUtil.filterCells(walEdit, cell -> {
+            String cf = Bytes.toString(CellUtil.cloneFamily(cell));
+            if (existColumnFamilies.contains(cf)) {
+              return cell;
+            } else {
+              missingCFs.add(cf);
+              return null;
+            }
+          });
+          if (!walEdit.isEmpty()) {
+            Entry newEntry = new Entry(e.getKey(), walEdit);
+            entries.add(newEntry);
+          }
+
+          if (!missingCFs.isEmpty()) {
+            // Would potentially be better to retry in one of the outer loops
+            // and add a table filter there; but that would break the encapsulation,
+            // so we're doing the filtering here.
+            LOG.warn(
+                "Missing column family detected at sink, local column family also does not exist,"
+                    + " filtering edits for table '{}',column family '{}'", tableName, missingCFs);
+          }
+        }
+        if (!entries.isEmpty()) {
+          entryList.add(entries);
+        }
+      }
+    } catch (IOException iox) {
+      LOG.warn("Exception when creating connection to check local table", iox);
+      return oldEntryList;
+    }
+    return entryList;
   }
 
   private void reconnectToPeerCluster() {
@@ -403,36 +540,21 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
         return true;
       } catch (IOException ioe) {
         if (ioe instanceof RemoteException) {
-          ioe = ((RemoteException) ioe).unwrapRemoteException();
-          LOG.warn("{} Can't replicate because of an error on the remote cluster: ", logPeerId(),
-            ioe);
-          if (ioe instanceof TableNotFoundException) {
-            if (dropOnDeletedTables) {
-              // this is a bit fragile, but cannot change how TNFE is serialized
-              // at least check whether the table name is legal
-              TableName table = parseTable(ioe.getMessage());
-              if (table != null) {
-                try (Connection localConn =
-                    ConnectionFactory.createConnection(ctx.getLocalConfiguration())) {
-                  if (!localConn.getAdmin().tableExists(table)) {
-                    // Would potentially be better to retry in one of the outer loops
-                    // and add a table filter there; but that would break the encapsulation,
-                    // so we're doing the filtering here.
-                    LOG.info("{} Missing table detected at sink, local table also does not "
-                      + "exist, filtering edits for '{}'", logPeerId(), table);
-                    batches = filterBatches(batches, table);
-                    continue;
-                  }
-                } catch (IOException iox) {
-                  LOG.warn("{} Exception checking for local table: ", logPeerId(), iox);
-                }
-              }
+          if (dropOnDeletedTables && isTableNotFoundException(ioe)) {
+            // Only filter the edits to replicate and don't change the entries in replicateContext
+            // as the upper layer rely on it.
+            batches = filterNotExistTableEdits(batches);
+            if (batches.isEmpty()) {
+              LOG.warn("After filter not exist table's edits, 0 edits to replicate, just return");
+              return true;
+            }
+          } else if (dropOnDeletedColumnFamilies && isNoSuchColumnFamilyException(ioe)) {
+            batches = filterNotExistColumnFamilyEdits(batches);
+            if (batches.isEmpty()) {
+              LOG.warn(
+                  "After filter not exist column family's edits, 0 edits to replicate, just return");
+              return true;
             }
-            // fall through and sleep below
-          } else {
-            LOG.warn("{} Peer encountered RemoteException, rechecking all sinks: ", logPeerId(),
-              ioe);
-            replicationSinkMgr.chooseSinks();
           }
         } else {
           if (ioe instanceof SocketTimeoutException) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java
index 2d039b0..872aa0a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import static org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_TABLE_KEY;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
@@ -136,7 +137,7 @@ public class TestReplicationDroppedTables extends TestReplicationBase {
   }
 
   private void testEditsBehindDroppedTable(boolean allowProceeding, String tName) throws Exception {
-    CONF1.setBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, allowProceeding);
+    CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, allowProceeding);
     CONF1.setInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, 1);
 
     // make sure we have a single region server only, so that all
@@ -205,12 +206,12 @@ public class TestReplicationDroppedTables extends TestReplicationBase {
       verifyReplicationStuck();
     }
     // just to be safe
-    CONF1.setBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
+    CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
   }
 
   @Test
   public void testEditsBehindDroppedTableTiming() throws Exception {
-    CONF1.setBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, true);
+    CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, true);
     CONF1.setInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, 1);
 
     // make sure we have a single region server only, so that all
@@ -281,7 +282,7 @@ public class TestReplicationDroppedTables extends TestReplicationBase {
       verifyReplicationProceeded();
     }
     // just to be safe
-    CONF1.setBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
+    CONF1.setBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
   }
 
   private boolean peerHasAllNormalRows() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java
new file mode 100644
index 0000000..55ef825
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java
@@ -0,0 +1,240 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_GLOBAL;
+import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
+import static org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter.Predicate;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ LargeTests.class })
+public class TestReplicationEditsDroppedWithDeletedTableCFs {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestReplicationEditsDroppedWithDeletedTableCFs.class);
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestReplicationEditsDroppedWithDeletedTableCFs.class);
+
+  private static Configuration conf1 = HBaseConfiguration.create();
+  private static Configuration conf2 = HBaseConfiguration.create();
+
+  protected static HBaseTestingUtility utility1;
+  protected static HBaseTestingUtility utility2;
+
+  private static Admin admin1;
+  private static Admin admin2;
+
+  private static final TableName TABLE = TableName.valueOf("table");
+  private static final byte[] NORMAL_CF = Bytes.toBytes("normal_cf");
+  private static final byte[] DROPPED_CF = Bytes.toBytes("dropped_cf");
+
+  private static final byte[] ROW = Bytes.toBytes("row");
+  private static final byte[] QUALIFIER = Bytes.toBytes("q");
+  private static final byte[] VALUE = Bytes.toBytes("value");
+
+  private static final String PEER_ID = "1";
+  private static final long SLEEP_TIME = 1000;
+  private static final int NB_RETRIES = 10;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    // Set true to filter replication edits for dropped table
+    conf1.setBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, true);
+    conf1.set(ZOOKEEPER_ZNODE_PARENT, "/1");
+    conf1.setInt("replication.source.nb.capacity", 1);
+    utility1 = new HBaseTestingUtility(conf1);
+    utility1.startMiniZKCluster();
+    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
+    conf1 = utility1.getConfiguration();
+
+    conf2 = HBaseConfiguration.create(conf1);
+    conf2.set(ZOOKEEPER_ZNODE_PARENT, "/2");
+    utility2 = new HBaseTestingUtility(conf2);
+    utility2.setZkCluster(miniZK);
+
+    utility1.startMiniCluster(1);
+    utility2.startMiniCluster(1);
+
+    admin1 = utility1.getAdmin();
+    admin2 = utility2.getAdmin();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    utility2.shutdownMiniCluster();
+    utility1.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setup() throws Exception {
+    // Roll log
+    for (JVMClusterUtil.RegionServerThread r : utility1.getHBaseCluster()
+        .getRegionServerThreads()) {
+      utility1.getAdmin().rollWALWriter(r.getRegionServer().getServerName());
+    }
+    // add peer
+    ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
+        .setClusterKey(utility2.getClusterKey())
+        .setReplicateAllUserTables(true).build();
+    admin1.addReplicationPeer(PEER_ID, rpc);
+    // create table
+    createTable();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    // Remove peer
+    admin1.removeReplicationPeer(PEER_ID);
+    // Drop table
+    admin1.disableTable(TABLE);
+    admin1.deleteTable(TABLE);
+    admin2.disableTable(TABLE);
+    admin2.deleteTable(TABLE);
+  }
+
+  private void createTable() throws Exception {
+    TableDescriptor desc = createTableDescriptor(NORMAL_CF, DROPPED_CF);
+    admin1.createTable(desc);
+    admin2.createTable(desc);
+    utility1.waitUntilAllRegionsAssigned(desc.getTableName());
+    utility2.waitUntilAllRegionsAssigned(desc.getTableName());
+  }
+
+  @Test
+  public void testEditsDroppedWithDeleteCF() throws Exception {
+    admin1.disableReplicationPeer(PEER_ID);
+
+    try (Table table = utility1.getConnection().getTable(TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(DROPPED_CF, QUALIFIER, VALUE);
+      table.put(put);
+    }
+
+    deleteCf(admin1);
+    deleteCf(admin2);
+
+    admin1.enableReplicationPeer(PEER_ID);
+
+    verifyReplicationProceeded();
+  }
+
+  @Test
+  public void testEditsBehindDeleteCFTiming() throws Exception {
+    admin1.disableReplicationPeer(PEER_ID);
+
+    try (Table table = utility1.getConnection().getTable(TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(DROPPED_CF, QUALIFIER, VALUE);
+      table.put(put);
+    }
+
+    // Only delete cf from peer cluster
+    deleteCf(admin2);
+
+    admin1.enableReplicationPeer(PEER_ID);
+
+    // the source table's cf still exists, replication should be stalled
+    verifyReplicationStuck();
+    deleteCf(admin1);
+    // now the source table's cf is gone, replication should proceed, the
+    // offending edits be dropped
+    verifyReplicationProceeded();
+  }
+
+  private void verifyReplicationProceeded() throws Exception {
+    try (Table table = utility1.getConnection().getTable(TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(NORMAL_CF, QUALIFIER, VALUE);
+      table.put(put);
+    }
+    utility2.waitFor(NB_RETRIES * SLEEP_TIME, (Predicate<Exception>) () -> {
+      try (Table peerTable = utility2.getConnection().getTable(TABLE)) {
+        Result result = peerTable.get(new Get(ROW).addColumn(NORMAL_CF, QUALIFIER));
+        return result != null && !result.isEmpty()
+            && Bytes.equals(VALUE, result.getValue(NORMAL_CF, QUALIFIER));
+      }
+    });
+  }
+
+  private void verifyReplicationStuck() throws Exception {
+    try (Table table = utility1.getConnection().getTable(TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(NORMAL_CF, QUALIFIER, VALUE);
+      table.put(put);
+    }
+    try (Table peerTable = utility2.getConnection().getTable(TABLE)) {
+      for (int i = 0; i < NB_RETRIES; i++) {
+        Result result = peerTable.get(new Get(ROW).addColumn(NORMAL_CF, QUALIFIER));
+        if (result != null && !result.isEmpty()) {
+          fail("Edit should have been stuck behind dropped tables, but value is " + Bytes
+              .toString(result.getValue(NORMAL_CF, QUALIFIER)));
+        } else {
+          LOG.info("Row not replicated, let's wait a bit more...");
+          Thread.sleep(SLEEP_TIME);
+        }
+      }
+    }
+  }
+
+  private TableDescriptor createTableDescriptor(byte[]... cfs) {
+    return TableDescriptorBuilder.newBuilder(TABLE)
+        .setColumnFamilies(Arrays.stream(cfs).map(cf ->
+            ColumnFamilyDescriptorBuilder.newBuilder(cf).setScope(REPLICATION_SCOPE_GLOBAL).build())
+            .collect(Collectors.toList())
+        ).build();
+  }
+
+  private void deleteCf(Admin admin) throws IOException {
+    TableDescriptor desc = createTableDescriptor(NORMAL_CF);
+    admin.modifyTable(desc);
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java
new file mode 100644
index 0000000..56eecac
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter.Predicate;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ LargeTests.class })
+public class TestReplicationEditsDroppedWithDroppedTable {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestReplicationEditsDroppedWithDroppedTable.class);
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestReplicationEditsDroppedWithDroppedTable.class);
+
+  private static Configuration conf1 = HBaseConfiguration.create();
+  private static Configuration conf2 = HBaseConfiguration.create();
+
+  protected static HBaseTestingUtility utility1;
+  protected static HBaseTestingUtility utility2;
+
+  private static Admin admin1;
+  private static Admin admin2;
+
+  private static final String namespace = "NS";
+  private static final TableName NORMAL_TABLE = TableName.valueOf("normal-table");
+  private static final TableName DROPPED_TABLE = TableName.valueOf("dropped-table");
+  private static final TableName DROPPED_NS_TABLE = TableName.valueOf("NS:dropped-table");
+  private static final byte[] ROW = Bytes.toBytes("row");
+  private static final byte[] FAMILY = Bytes.toBytes("f");
+  private static final byte[] QUALIFIER = Bytes.toBytes("q");
+  private static final byte[] VALUE = Bytes.toBytes("value");
+
+  private static final String PEER_ID = "1";
+  private static final long SLEEP_TIME = 1000;
+  private static final int NB_RETRIES = 10;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    // Set true to filter replication edits for dropped table
+    conf1.setBoolean(HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_TABLE_KEY,
+        true);
+    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
+    conf1.setInt("replication.source.nb.capacity", 1);
+    utility1 = new HBaseTestingUtility(conf1);
+    utility1.startMiniZKCluster();
+    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
+    conf1 = utility1.getConfiguration();
+
+    conf2 = HBaseConfiguration.create(conf1);
+    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
+    utility2 = new HBaseTestingUtility(conf2);
+    utility2.setZkCluster(miniZK);
+
+    utility1.startMiniCluster(1);
+    utility2.startMiniCluster(1);
+
+    admin1 = utility1.getAdmin();
+    admin2 = utility2.getAdmin();
+
+    NamespaceDescriptor nsDesc = NamespaceDescriptor.create(namespace).build();
+    admin1.createNamespace(nsDesc);
+    admin2.createNamespace(nsDesc);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    utility2.shutdownMiniCluster();
+    utility1.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setup() throws Exception {
+    // Roll log
+    for (JVMClusterUtil.RegionServerThread r : utility1.getHBaseCluster()
+        .getRegionServerThreads()) {
+      utility1.getAdmin().rollWALWriter(r.getRegionServer().getServerName());
+    }
+    // add peer
+    ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
+        .setClusterKey(utility2.getClusterKey())
+        .setReplicateAllUserTables(true).build();
+    admin1.addReplicationPeer(PEER_ID, rpc);
+    // create table
+    createTable(NORMAL_TABLE);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    // Remove peer
+    admin1.removeReplicationPeer(PEER_ID);
+    // Drop table
+    admin1.disableTable(NORMAL_TABLE);
+    admin1.deleteTable(NORMAL_TABLE);
+    admin2.disableTable(NORMAL_TABLE);
+    admin2.deleteTable(NORMAL_TABLE);
+  }
+
+  private void createTable(TableName tableName) throws Exception {
+    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(
+        ColumnFamilyDescriptorBuilder.newBuilder(FAMILY)
+            .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()
+    ).build();
+    admin1.createTable(desc);
+    admin2.createTable(desc);
+    utility1.waitUntilAllRegionsAssigned(tableName);
+    utility2.waitUntilAllRegionsAssigned(tableName);
+  }
+
+  @Test
+  public void testEditsDroppedWithDroppedTable() throws Exception {
+    testWithDroppedTable(DROPPED_TABLE);
+  }
+
+  @Test
+  public void testEditsDroppedWithDroppedTableNS() throws Exception {
+    testWithDroppedTable(DROPPED_NS_TABLE);
+  }
+
+  private void testWithDroppedTable(TableName droppedTableName) throws Exception {
+    createTable(droppedTableName);
+    admin1.disableReplicationPeer(PEER_ID);
+
+    try (Table droppedTable = utility1.getConnection().getTable(droppedTableName)) {
+      Put put = new Put(ROW);
+      put.addColumn(FAMILY, QUALIFIER, VALUE);
+      droppedTable.put(put);
+    }
+
+    admin1.disableTable(droppedTableName);
+    admin1.deleteTable(droppedTableName);
+    admin2.disableTable(droppedTableName);
+    admin2.deleteTable(droppedTableName);
+
+    admin1.enableReplicationPeer(PEER_ID);
+
+    verifyReplicationProceeded();
+  }
+
+  @Test
+  public void testEditsBehindDroppedTableTiming() throws Exception {
+    createTable(DROPPED_TABLE);
+    admin1.disableReplicationPeer(PEER_ID);
+
+    try (Table droppedTable = utility1.getConnection().getTable(DROPPED_TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(FAMILY, QUALIFIER, VALUE);
+      droppedTable.put(put);
+    }
+
+    // Only delete table from peer cluster
+    admin2.disableTable(DROPPED_TABLE);
+    admin2.deleteTable(DROPPED_TABLE);
+
+    admin1.enableReplicationPeer(PEER_ID);
+
+    // the source table still exists, replication should be stalled
+    verifyReplicationStuck();
+    admin1.disableTable(DROPPED_TABLE);
+    // still stuck, source table still exists
+    verifyReplicationStuck();
+    admin1.deleteTable(DROPPED_TABLE);
+    // now the source table is gone, replication should proceed, the
+    // offending edits be dropped
+    verifyReplicationProceeded();
+  }
+
+  private void verifyReplicationProceeded() throws Exception {
+    try (Table normalTable = utility1.getConnection().getTable(NORMAL_TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(FAMILY, QUALIFIER, VALUE);
+      normalTable.put(put);
+    }
+    utility2.waitFor(NB_RETRIES * SLEEP_TIME, (Predicate<Exception>) () -> {
+      try (Table normalTable = utility2.getConnection().getTable(NORMAL_TABLE)) {
+        Result result = normalTable.get(new Get(ROW).addColumn(FAMILY, QUALIFIER));
+        return result != null && !result.isEmpty()
+            && Bytes.equals(VALUE, result.getValue(FAMILY, QUALIFIER));
+      }
+    });
+  }
+
+  private void verifyReplicationStuck() throws Exception {
+    try (Table normalTable = utility1.getConnection().getTable(NORMAL_TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(FAMILY, QUALIFIER, VALUE);
+      normalTable.put(put);
+    }
+    try (Table normalTable = utility2.getConnection().getTable(NORMAL_TABLE)) {
+      for (int i = 0; i < NB_RETRIES; i++) {
+        Result result = normalTable.get(new Get(ROW).addColumn(FAMILY, QUALIFIER));
+        if (result != null && !result.isEmpty()) {
+          fail("Edit should have been stuck behind dropped tables, but value is " + Bytes
+              .toString(result.getValue(FAMILY, QUALIFIER)));
+        } else {
+          LOG.info("Row not replicated, let's wait a bit more...");
+          Thread.sleep(SLEEP_TIME);
+        }
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java
new file mode 100644
index 0000000..c6491e5
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_GLOBAL;
+import static org.junit.Assert.fail;
+
+import java.util.Arrays;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Replication with dropped table will stuck as the default REPLICATION_DROP_ON_DELETED_TABLE_KEY
+ * is false.
+ */
+@Category({ LargeTests.class })
+public class TestReplicationStuckWithDeletedTableCFs {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestReplicationStuckWithDeletedTableCFs.class);
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestReplicationStuckWithDeletedTableCFs.class);
+
+  private static Configuration conf1 = HBaseConfiguration.create();
+  private static Configuration conf2 = HBaseConfiguration.create();
+
+  protected static HBaseTestingUtility utility1;
+  protected static HBaseTestingUtility utility2;
+
+  private static Admin admin1;
+  private static Admin admin2;
+
+  private static final TableName TABLE = TableName.valueOf("normal-table");
+  private static final byte[] ROW = Bytes.toBytes("row");
+  private static final byte[] NORMAL_FAMILY = Bytes.toBytes("nf");
+  private static final byte[] DROPPED_FAMILY = Bytes.toBytes("df");
+  private static final byte[] QUALIFIER = Bytes.toBytes("q");
+  private static final byte[] VALUE = Bytes.toBytes("value");
+
+  private static final String PEER_ID = "1";
+  private static final long SLEEP_TIME = 1000;
+  private static final int NB_RETRIES = 10;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
+    conf1.setInt("replication.source.nb.capacity", 1);
+    utility1 = new HBaseTestingUtility(conf1);
+    utility1.startMiniZKCluster();
+    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
+    conf1 = utility1.getConfiguration();
+
+    conf2 = HBaseConfiguration.create(conf1);
+    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
+    utility2 = new HBaseTestingUtility(conf2);
+    utility2.setZkCluster(miniZK);
+
+    utility1.startMiniCluster(1);
+    utility2.startMiniCluster(1);
+
+    admin1 = utility1.getAdmin();
+    admin2 = utility2.getAdmin();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    utility2.shutdownMiniCluster();
+    utility1.shutdownMiniCluster();
+  }
+
+  private void createTable(TableName tableName) throws Exception {
+    TableDescriptor desc = createTableDescriptor(DROPPED_FAMILY, NORMAL_FAMILY);
+    admin1.createTable(desc);
+    admin2.createTable(desc);
+    utility1.waitUntilAllRegionsAssigned(tableName);
+    utility2.waitUntilAllRegionsAssigned(tableName);
+  }
+
+  @Test
+  public void testEditsStuckBehindDeletedCFs() throws Exception {
+    // add peer
+    ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
+        .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build();
+    admin1.addReplicationPeer(PEER_ID, rpc);
+
+    // create table
+    createTable(TABLE);
+
+    admin1.disableReplicationPeer(PEER_ID);
+
+    try (Table droppedTable = utility1.getConnection().getTable(TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(DROPPED_FAMILY, QUALIFIER, VALUE).addColumn(NORMAL_FAMILY, QUALIFIER, VALUE);
+      droppedTable.put(put);
+    }
+
+    // delete cf
+    TableDescriptor desc = createTableDescriptor(NORMAL_FAMILY);
+    admin1.modifyTable(desc);
+    admin2.modifyTable(desc);
+
+    admin1.enableReplicationPeer(PEER_ID);
+
+    verifyReplicationStuck();
+
+    // Remove peer
+    admin1.removeReplicationPeer(PEER_ID);
+    // Drop table
+    admin1.disableTable(TABLE);
+    admin1.deleteTable(TABLE);
+    admin2.disableTable(TABLE);
+    admin2.deleteTable(TABLE);
+  }
+
+  private void verifyReplicationStuck() throws Exception {
+    try (Table normalTable = utility1.getConnection().getTable(TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(NORMAL_FAMILY, QUALIFIER, VALUE);
+      normalTable.put(put);
+    }
+    try (Table normalTable = utility2.getConnection().getTable(TABLE)) {
+      for (int i = 0; i < NB_RETRIES; i++) {
+        Result result = normalTable.get(new Get(ROW).addColumn(NORMAL_FAMILY, QUALIFIER));
+        if (result != null && !result.isEmpty()) {
+          fail("Edit should have been stuck behind dropped tables, but value is " + Bytes
+              .toString(result.getValue(NORMAL_FAMILY, QUALIFIER)));
+        } else {
+          LOG.info("Row not replicated, let's wait a bit more...");
+          Thread.sleep(SLEEP_TIME);
+        }
+      }
+    }
+  }
+
+  private TableDescriptor createTableDescriptor(byte[]... cfs) {
+    return TableDescriptorBuilder.newBuilder(TABLE)
+        .setColumnFamilies(Arrays.stream(cfs).map(cf ->
+            ColumnFamilyDescriptorBuilder.newBuilder(cf).setScope(REPLICATION_SCOPE_GLOBAL).build())
+            .collect(Collectors.toList())
+        ).build();
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java
new file mode 100644
index 0000000..cebf7d8
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_GLOBAL;
+import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Replication with dropped table will stuck as the default REPLICATION_DROP_ON_DELETED_TABLE_KEY
+ * is false.
+ */
+@Category({ LargeTests.class })
+public class TestReplicationStuckWithDroppedTable {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestReplicationStuckWithDroppedTable.class);
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestReplicationEditsDroppedWithDroppedTable.class);
+
+  private static Configuration conf1 = HBaseConfiguration.create();
+  private static Configuration conf2 = HBaseConfiguration.create();
+
+  protected static HBaseTestingUtility utility1;
+  protected static HBaseTestingUtility utility2;
+
+  private static Admin admin1;
+  private static Admin admin2;
+
+  private static final TableName NORMAL_TABLE = TableName.valueOf("normal-table");
+  private static final TableName DROPPED_TABLE = TableName.valueOf("dropped-table");
+  private static final byte[] ROW = Bytes.toBytes("row");
+  private static final byte[] FAMILY = Bytes.toBytes("f");
+  private static final byte[] QUALIFIER = Bytes.toBytes("q");
+  private static final byte[] VALUE = Bytes.toBytes("value");
+
+  private static final String PEER_ID = "1";
+  private static final long SLEEP_TIME = 1000;
+  private static final int NB_RETRIES = 10;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    conf1.set(ZOOKEEPER_ZNODE_PARENT, "/1");
+    conf1.setInt("replication.source.nb.capacity", 1);
+    utility1 = new HBaseTestingUtility(conf1);
+    utility1.startMiniZKCluster();
+    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
+    conf1 = utility1.getConfiguration();
+
+    conf2 = HBaseConfiguration.create(conf1);
+    conf2.set(ZOOKEEPER_ZNODE_PARENT, "/2");
+    utility2 = new HBaseTestingUtility(conf2);
+    utility2.setZkCluster(miniZK);
+
+    utility1.startMiniCluster(1);
+    utility2.startMiniCluster(1);
+
+    admin1 = utility1.getAdmin();
+    admin2 = utility2.getAdmin();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    utility2.shutdownMiniCluster();
+    utility1.shutdownMiniCluster();
+  }
+
+  private void createTable(TableName tableName) throws Exception {
+    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(
+        ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setScope(REPLICATION_SCOPE_GLOBAL).build()
+    ).build();
+    admin1.createTable(desc);
+    admin2.createTable(desc);
+    utility1.waitUntilAllRegionsAssigned(tableName);
+    utility2.waitUntilAllRegionsAssigned(tableName);
+  }
+
+  @Test
+  public void testEditsStuckBehindDroppedTable() throws Exception {
+    // add peer
+    ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
+        .setClusterKey(utility2.getClusterKey())
+        .setReplicateAllUserTables(true).build();
+    admin1.addReplicationPeer(PEER_ID, rpc);
+
+    // create table
+    createTable(NORMAL_TABLE);
+    createTable(DROPPED_TABLE);
+
+    admin1.disableReplicationPeer(PEER_ID);
+
+    try (Table droppedTable = utility1.getConnection().getTable(DROPPED_TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(FAMILY, QUALIFIER, VALUE);
+      droppedTable.put(put);
+    }
+
+    admin1.disableTable(DROPPED_TABLE);
+    admin1.deleteTable(DROPPED_TABLE);
+    admin2.disableTable(DROPPED_TABLE);
+    admin2.deleteTable(DROPPED_TABLE);
+
+    admin1.enableReplicationPeer(PEER_ID);
+
+    verifyReplicationStuck();
+
+    // Remove peer
+    admin1.removeReplicationPeer(PEER_ID);
+    // Drop table
+    admin1.disableTable(NORMAL_TABLE);
+    admin1.deleteTable(NORMAL_TABLE);
+    admin2.disableTable(NORMAL_TABLE);
+    admin2.deleteTable(NORMAL_TABLE);
+  }
+
+  private void verifyReplicationStuck() throws Exception {
+    try (Table normalTable = utility1.getConnection().getTable(NORMAL_TABLE)) {
+      Put put = new Put(ROW);
+      put.addColumn(FAMILY, QUALIFIER, VALUE);
+      normalTable.put(put);
+    }
+    try (Table normalTable = utility2.getConnection().getTable(NORMAL_TABLE)) {
+      for (int i = 0; i < NB_RETRIES; i++) {
+        Result result = normalTable.get(new Get(ROW).addColumn(FAMILY, QUALIFIER));
+        if (result != null && !result.isEmpty()) {
+          fail("Edit should have been stuck behind dropped tables, but value is " + Bytes
+              .toString(result.getValue(FAMILY, QUALIFIER)));
+        } else {
+          LOG.info("Row not replicated, let's wait a bit more...");
+          Thread.sleep(SLEEP_TIME);
+        }
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java
new file mode 100644
index 0000000..2940b0d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.replication.ReplicationEndpoint.Context;
+import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKeyImpl;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+/**
+ * Tests {@link HBaseInterClusterReplicationEndpoint#filterNotExistColumnFamilyEdits(List)} and
+ * {@link HBaseInterClusterReplicationEndpoint#filterNotExistTableEdits(List)}
+ */
+@Category({ ReplicationTests.class, MediumTests.class })
+public class TestHBaseInterClusterReplicationEndpointFilterEdits {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestHBaseInterClusterReplicationEndpointFilterEdits.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static HBaseInterClusterReplicationEndpoint endpoint;
+
+  private static final TableName TABLE1 = TableName.valueOf("T1");
+  private static final TableName TABLE2 = TableName.valueOf("T2");
+
+  private static final byte[] FAMILY = Bytes.toBytes("CF");
+  private static final byte[] NON_EXISTING_FAMILY = Bytes.toBytes("NECF");
+  private static final byte[] QUALIFIER = Bytes.toBytes("Q");
+  private static final byte[] ROW = Bytes.toBytes("r");
+  private static final byte[] VALUE = Bytes.toBytes("v");
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    UTIL.startMiniCluster();
+    ReplicationPeer replicationPeer = mock(ReplicationPeer.class);
+    ReplicationPeerConfig rpc = mock(ReplicationPeerConfig.class);
+    when(rpc.isSerial()).thenReturn(false);
+    when(replicationPeer.getPeerConfig()).thenReturn(rpc);
+    Context context = new Context(UTIL.getConfiguration(), UTIL.getConfiguration(), null,
+        null, null, replicationPeer, null, null, null);
+    endpoint = new HBaseInterClusterReplicationEndpoint();
+    endpoint.init(context);
+
+    UTIL.createTable(TABLE1, FAMILY);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testFilterNotExistColumnFamilyEdits() {
+    List<List<Entry>> entryList = new ArrayList<>();
+    // should be filtered
+    Cell c1 = new KeyValue(ROW, NON_EXISTING_FAMILY, QUALIFIER, System.currentTimeMillis(),
+        Type.Put, VALUE);
+    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE1, System.currentTimeMillis()),
+        new WALEdit().add(c1));
+    entryList.add(Lists.newArrayList(e1));
+    // should be kept
+    Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, System.currentTimeMillis(), Type.Put, VALUE);
+    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, System.currentTimeMillis()),
+        new WALEdit().add(c2));
+    entryList.add(Lists.newArrayList(e2, e1));
+    List<List<Entry>> filtered = endpoint.filterNotExistColumnFamilyEdits(entryList);
+    assertEquals(1, filtered.size());
+    assertEquals(1, filtered.get(0).get(0).getEdit().getCells().size());
+    Cell cell = filtered.get(0).get(0).getEdit().getCells().get(0);
+    assertTrue(CellUtil.matchingFamily(cell, FAMILY));
+  }
+
+  @Test
+  public void testFilterNotExistTableEdits() {
+    List<List<Entry>> entryList = new ArrayList<>();
+    // should be filtered
+    Cell c1 = new KeyValue(ROW, FAMILY, QUALIFIER, System.currentTimeMillis(), Type.Put, VALUE);
+    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE2, System.currentTimeMillis()),
+        new WALEdit().add(c1));
+    entryList.add(Lists.newArrayList(e1));
+    // should be kept
+    Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, System.currentTimeMillis(), Type.Put, VALUE);
+    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, System.currentTimeMillis()),
+        new WALEdit().add(c2));
+    entryList.add(Lists.newArrayList(e2));
+    List<List<Entry>> filtered = endpoint.filterNotExistTableEdits(entryList);
+    assertEquals(1, filtered.size());
+    Entry entry = filtered.get(0).get(0);
+    assertEquals(1, entry.getEdit().getCells().size());
+    assertEquals(TABLE1, entry.getKey().getTableName());
+  }
+
+}