You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2019/06/26 01:29:40 UTC

[hbase] branch branch-1.4 updated (4796608c -> e94908c)

This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a change to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git.


    from 4796608c HBASE-22616 responseTooXXX logging for Multi should characterize the component ops
     new 07288fa  HBASE-22627 Port HBASE-22617 (Recovered WAL directories not getting cleaned up) to branch-1 (#339)
     new e94908c  HBASE-22629 Remove TestReplicationDroppedTables from branch-1

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../apache/hadoop/hbase/backup/HFileArchiver.java  |   4 +-
 .../java/org/apache/hadoop/hbase/io/HFileLink.java |   3 +-
 .../hadoop/hbase/master/MasterFileSystem.java      |   4 +-
 .../master/procedure/DeleteTableProcedure.java     |   3 +-
 .../hadoop/hbase/migration/NamespaceUpgrade.java   |   3 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 112 ++++----
 .../hbase/regionserver/HRegionFileSystem.java      |  13 +-
 .../hbase/regionserver/SplitTransactionImpl.java   |   2 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java      |   5 +-
 .../java/org/apache/hadoop/hbase/util/FSUtils.java |  51 +++-
 .../apache/hadoop/hbase/util/HFileArchiveUtil.java |  15 +-
 .../org/apache/hadoop/hbase/wal/WALSplitter.java   |   7 +-
 .../hadoop/hbase/backup/TestHFileArchiving.java    |   4 +-
 .../org/apache/hadoop/hbase/io/TestHFileLink.java  |   6 +-
 .../hbase/master/TestDistributedLogSplitting.java  |   7 +-
 .../regionserver/TestRegionMergeTransaction.java   |   2 +-
 .../hbase/regionserver/TestSplitTransaction.java   |   6 +-
 .../replication/TestReplicationDroppedTables.java  | 292 ---------------------
 .../org/apache/hadoop/hbase/wal/TestWALSplit.java  |   3 +-
 19 files changed, 132 insertions(+), 410 deletions(-)
 delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java


[hbase] 01/02: HBASE-22627 Port HBASE-22617 (Recovered WAL directories not getting cleaned up) to branch-1 (#339)

Posted by ap...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 07288fa4b15b875f0b5de2fbde4fc4772ce06a2c
Author: Andrew Purtell <ap...@apache.org>
AuthorDate: Tue Jun 25 18:14:01 2019 -0700

    HBASE-22627 Port HBASE-22617 (Recovered WAL directories not getting cleaned up) to branch-1 (#339)
    
    HBASE-22617 Recovered WAL directories not getting cleaned up (Duo Zhang)
    
    Signed-off-by: Zach York <zy...@apache.org>
---
 .../apache/hadoop/hbase/backup/HFileArchiver.java  |   4 +-
 .../java/org/apache/hadoop/hbase/io/HFileLink.java |   3 +-
 .../hadoop/hbase/master/MasterFileSystem.java      |   4 +-
 .../master/procedure/DeleteTableProcedure.java     |   3 +-
 .../hadoop/hbase/migration/NamespaceUpgrade.java   |   3 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 112 +++++++++------------
 .../hbase/regionserver/HRegionFileSystem.java      |  13 ++-
 .../hbase/regionserver/SplitTransactionImpl.java   |   2 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java      |   5 +-
 .../java/org/apache/hadoop/hbase/util/FSUtils.java |  51 ++++++++--
 .../apache/hadoop/hbase/util/HFileArchiveUtil.java |  15 ++-
 .../org/apache/hadoop/hbase/wal/WALSplitter.java   |   7 +-
 .../hadoop/hbase/backup/TestHFileArchiving.java    |   4 +-
 .../org/apache/hadoop/hbase/io/TestHFileLink.java  |   6 +-
 .../hbase/master/TestDistributedLogSplitting.java  |   7 +-
 .../regionserver/TestRegionMergeTransaction.java   |   2 +-
 .../hbase/regionserver/TestSplitTransaction.java   |   6 +-
 .../org/apache/hadoop/hbase/wal/TestWALSplit.java  |   3 +-
 18 files changed, 132 insertions(+), 118 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index 1231173..63c8e08 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -32,10 +32,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -85,7 +83,7 @@ public class HFileArchiver {
       throws IOException {
     Path rootDir = FSUtils.getRootDir(conf);
     archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()),
-      HRegion.getRegionDir(rootDir, info));
+      FSUtils.getRegionDirFromRootDir(rootDir, info));
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index c2a25e5..c724dda 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@@ -486,7 +485,7 @@ public class HFileLink extends FileLink {
     String linkName = createHFileLinkName(FSUtils.getTableName(tablePath),
             regionPath.getName(), hfileName);
     Path linkTableDir = FSUtils.getTableDir(rootDir, linkTableName);
-    Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName);
+    Path regionDir = new Path(linkTableDir, linkRegionName);
     return new Path(new Path(regionDir, familyPath.getName()), linkName);
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 28e5801..004627f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -212,7 +212,7 @@ public class MasterFileSystem {
     return this.walFsOk;
   }
 
-  protected FileSystem getWALFileSystem() {
+  public FileSystem getWALFileSystem() {
     return this.walFs;
   }
 
@@ -689,6 +689,4 @@ public class MasterFileSystem {
       LOG.warn("Failed archiving meta log for server " + serverName, ie);
     }
   }
-
-
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 6d27b46..7fdd1a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -336,7 +335,7 @@ public class DeleteTableProcedure
       for (HRegionInfo hri : regions) {
         LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
         HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
-            tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
+            tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
       }
       LOG.debug("Table '" + tableName + "' archived!");
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index 034aac2..6da05cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -319,8 +319,7 @@ public class NamespaceUpgrade implements Tool {
     }
 
     // Since meta table name has changed rename meta region dir from it's old encoding to new one
-    Path oldMetaRegionDir = HRegion.getRegionDir(rootDir,
-      new Path(newMetaDir, "1028785192").toString());
+    Path oldMetaRegionDir = new Path(rootDir, new Path(newMetaDir, "1028785192").toString());
     if (fs.exists(oldMetaRegionDir)) {
       LOG.info("Migrating meta region " + oldMetaRegionDir + " to " + newMetaRegionDir);
       if (!fs.rename(oldMetaRegionDir, newMetaRegionDir)) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6e2fb19..2102de8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -21,8 +21,10 @@ package org.apache.hadoop.hbase.regionserver;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 import com.google.common.io.Closeables;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.Descriptors;
@@ -4163,7 +4165,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     if (nonExistentList != null) {
       for (byte[] family : nonExistentList) {
         // Perhaps schema was changed between crash and replay
-        LOG.info("No family for " + Bytes.toString(family) + " omit from reply.");
+        LOG.info("No family for " + Bytes.toString(family) + " omit from replay.");
         familyMap.remove(family);
       }
     }
@@ -4276,54 +4278,58 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         minSeqIdForTheRegion = maxSeqIdInStore;
       }
     }
-    long seqid = minSeqIdForTheRegion;
+    long seqId = minSeqIdForTheRegion;
 
     FileSystem walFS = getWalFileSystem();
-    Path regionDir = getWALRegionDir();
     FileSystem rootFS = getFilesystem();
-    Path defaultRegionDir = getRegionDir(FSUtils.getRootDir(conf), getRegionInfo());
+    Path regionDir = FSUtils.getRegionDirFromRootDir(FSUtils.getRootDir(conf), getRegionInfo());
+    Path regionWALDir = getWALRegionDir();
+    Path wrongRegionWALDir = FSUtils.getWrongWALRegionDir(conf, getRegionInfo().getTable(),
+      getRegionInfo().getEncodedName());
 
+    // We made a mistake in HBASE-20734 so we need to do this dirty hack...
+    NavigableSet<Path> filesUnderWrongRegionWALDir =
+      WALSplitter.getSplitEditFilesSorted(walFS, wrongRegionWALDir);
+    seqId = Math.max(seqId, replayRecoveredEditsForPaths(minSeqIdForTheRegion, walFS,
+      filesUnderWrongRegionWALDir, reporter, regionDir));
     // This is to ensure backwards compatability with HBASE-20723 where recovered edits can appear
     // under the root dir even if walDir is set.
-    NavigableSet<Path> filesUnderRootDir = null;
-    if (!regionDir.equals(defaultRegionDir)) {
-      filesUnderRootDir =
-          WALSplitter.getSplitEditFilesSorted(rootFS, defaultRegionDir);
-      seqid = Math.max(seqid,
-          replayRecoveredEditsForPaths(minSeqIdForTheRegion, rootFS, filesUnderRootDir, reporter,
-              defaultRegionDir));
-    }
-    NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(walFS, regionDir);
-    seqid = Math.max(seqid, replayRecoveredEditsForPaths(minSeqIdForTheRegion, walFS,
-        files, reporter, regionDir));
-
-    if (seqid > minSeqIdForTheRegion) {
+    NavigableSet<Path> filesUnderRootDir = Sets.newTreeSet();
+    if (!regionWALDir.equals(regionDir)) {
+      filesUnderRootDir = WALSplitter.getSplitEditFilesSorted(rootFS, regionDir);
+      seqId = Math.max(seqId, replayRecoveredEditsForPaths(minSeqIdForTheRegion, rootFS,
+        filesUnderRootDir, reporter, regionDir));
+    }
+    NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(walFS, regionWALDir);
+    seqId = Math.max(seqId, replayRecoveredEditsForPaths(minSeqIdForTheRegion, walFS,
+        files, reporter, regionWALDir));
+    if (seqId > minSeqIdForTheRegion) {
       // Then we added some edits to memory. Flush and cleanup split edit files.
-      internalFlushcache(null, seqid, stores.values(), status, false);
+      internalFlushcache(null, seqId, stores.values(), status, false);
     }
-    // Now delete the content of recovered edits.  We're done w/ them.
-    if (files.size() > 0 && this.conf.getBoolean("hbase.region.archive.recovered.edits", false)) {
+    // Now delete the content of recovered edits. We're done w/ them.
+    if (conf.getBoolean("hbase.region.archive.recovered.edits", false)) {
       // For debugging data loss issues!
       // If this flag is set, make use of the hfile archiving by making recovered.edits a fake
       // column family. Have to fake out file type too by casting our recovered.edits as storefiles
-      String fakeFamilyName = WALSplitter.getRegionDirRecoveredEditsDir(regionDir).getName();
-      Set<StoreFile> fakeStoreFiles = new HashSet<>(files.size());
-      for (Path file: files) {
-        fakeStoreFiles.add(
-            new StoreFile(walFS, file, this.conf, null, null));
+      String fakeFamilyName = WALSplitter.getRegionDirRecoveredEditsDir(regionWALDir).getName();
+      Set<StoreFile> fakeStoreFiles = new HashSet<>();
+      for (Path file: Iterables.concat(files, filesUnderWrongRegionWALDir)) {
+        fakeStoreFiles.add(new StoreFile(walFS, file, conf, null, null));
+      }
+      for (Path file: filesUnderRootDir) {
+        fakeStoreFiles.add(new StoreFile(rootFS, file, conf, null, null));
       }
       getRegionWALFileSystem().removeStoreFiles(fakeFamilyName, fakeStoreFiles);
     } else {
-      if (filesUnderRootDir != null) {
-        for (Path file : filesUnderRootDir) {
-          if (!rootFS.delete(file, false)) {
-            LOG.error("Failed delete of {} under root directory." + file);
-          } else {
-            LOG.debug("Deleted recovered.edits root directory file=" + file);
-          }
+      for (Path file : filesUnderRootDir) {
+        if (!rootFS.delete(file, false)) {
+          LOG.error("Failed delete of " + file + " from under the root directory");
+        } else {
+          LOG.debug("Deleted recovered.edits under root directory, file=" + file);
         }
       }
-      for (Path file: files) {
+      for (Path file : Iterables.concat(files, filesUnderWrongRegionWALDir)) {
         if (!walFS.delete(file, false)) {
           LOG.error("Failed delete of " + file);
         } else {
@@ -4331,7 +4337,17 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         }
       }
     }
-    return seqid;
+
+    // We have replayed all the recovered edits. Let's delete the wrong directories introduced
+    // in HBASE-20734, see HBASE-22617 for more details.
+    FileSystem walFs = getWalFileSystem();
+    if (walFs.exists(wrongRegionWALDir)) {
+      if (!walFs.delete(wrongRegionWALDir, true)) {
+        LOG.warn("Unable to delete " + wrongRegionWALDir);
+      }
+    }
+
+    return seqId;
   }
 
   private long replayRecoveredEditsForPaths(long minSeqIdForTheRegion, FileSystem fs,
@@ -7197,34 +7213,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   }
 
   /**
-   * Computes the Path of the HRegion
-   *
-   * @param tabledir qualified path for table
-   * @param name ENCODED region name
-   * @return Path of HRegion directory
-   * @deprecated For tests only; to be removed.
-   */
-  @Deprecated
-  public static Path getRegionDir(final Path tabledir, final String name) {
-    return new Path(tabledir, name);
-  }
-
-  /**
-   * Computes the Path of the HRegion
-   *
-   * @param rootdir qualified path of HBase root directory
-   * @param info HRegionInfo for the region
-   * @return qualified path of region directory
-   * @deprecated For tests only; to be removed.
-   */
-  @Deprecated
-  @VisibleForTesting
-  public static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
-    return new Path(
-      FSUtils.getTableDir(rootdir, info.getTable()), info.getEncodedName());
-  }
-
-  /**
    * Determines if the specified row is within the row range specified by the
    * specified HRegionInfo
    *
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index bc3b039..2d827ed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -610,19 +610,26 @@ public class HRegionFileSystem {
   /**
    * Create the region splits directory.
    */
-  void createSplitsDir() throws IOException {
+  void createSplitsDir(HRegionInfo daughterA, HRegionInfo daughterB) throws IOException {
     Path splitdir = getSplitsDir();
     if (fs.exists(splitdir)) {
       LOG.info("The " + splitdir + " directory exists.  Hence deleting it to recreate it");
       if (!deleteDir(splitdir)) {
-        throw new IOException("Failed deletion of " + splitdir
-            + " before creating them again.");
+        throw new IOException("Failed deletion of " + splitdir + " before creating them again.");
       }
     }
     // splitDir doesn't exists now. No need to do an exists() call for it.
     if (!createDir(splitdir)) {
       throw new IOException("Failed create of " + splitdir);
     }
+    Path daughterATmpDir = getSplitsDir(daughterA);
+    if (!createDir(daughterATmpDir)) {
+      throw new IOException("Failed create of " + daughterATmpDir);
+    }
+    Path daughterBTmpDir = getSplitsDir(daughterB);
+    if (!createDir(daughterBTmpDir)) {
+      throw new IOException("Failed create of " + daughterBTmpDir);
+    }
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
index f9a5d31..3dee699 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
@@ -384,7 +384,7 @@ public class SplitTransactionImpl implements SplitTransaction {
             hri_b, std);
     }
 
-    this.parent.getRegionFileSystem().createSplitsDir();
+    this.parent.getRegionFileSystem().createSplitsDir(hri_a, hri_b);
 
     transition(SplitTransactionPhase.CREATE_SPLIT_DIR);
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index 17dd8c2..f87c447 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -648,8 +648,9 @@ public class RestoreSnapshotHelper {
       if (linkPath != null) {
         in = HFileLink.buildFromHFileLinkPattern(conf, linkPath).open(fs);
       } else {
-        linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(),
-                        regionInfo.getEncodedName()), familyDir.getName()), hfileName);
+        linkPath = new Path(new Path(new Path(snapshotManifest.getSnapshotDir(),
+          regionInfo.getEncodedName()),
+          familyDir.getName()), hfileName);
         in = fs.open(linkPath);
       }
       OutputStream out = fs.create(outPath);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index cb3a46e..76c7bd4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -90,7 +90,6 @@ import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SequenceFile;
@@ -1094,26 +1093,36 @@ public abstract class FSUtils {
    * @return the region directory used to store WALs under the WALRootDir
    * @throws IOException if there is an exception determining the WALRootDir
    */
-  public static Path getWALRegionDir(final Configuration conf,
-      final HRegionInfo regionInfo)
+  public static Path getWALRegionDir(final Configuration conf, final HRegionInfo regionInfo)
       throws IOException {
     return new Path(getWALTableDir(conf, regionInfo.getTable()),
         regionInfo.getEncodedName());
   }
 
   /**
+   * Returns the WAL region directory based on the region info
+   * @param conf configuration to determine WALRootDir
+   * @param tableName the table name
+   * @param encodedRegionName the encoded region name
+   * @return the region directory used to store WALs under the WALRootDir
+   * @throws IOException if there is an exception determining the WALRootDir
+   */
+  public static Path getWALRegionDir(final Configuration conf, final TableName tableName,
+      final String encodedRegionName) throws IOException {
+    return new Path(getWALTableDir(conf, tableName), encodedRegionName);
+  }
+
+  /**
    * Checks if meta region exists
    *
    * @param fs file system
-   * @param rootdir root directory of HBase installation
+   * @param rootDir root directory of HBase installation
    * @return true if exists
    * @throws IOException e
    */
   @SuppressWarnings("deprecation")
-  public static boolean metaRegionExists(FileSystem fs, Path rootdir)
-  throws IOException {
-    Path metaRegionDir =
-      HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
+  public static boolean metaRegionExists(FileSystem fs, Path rootDir) throws IOException {
+    Path metaRegionDir = getRegionDirFromRootDir(rootDir, HRegionInfo.FIRST_META_REGIONINFO);
     return fs.exists(metaRegionDir);
   }
 
@@ -1260,8 +1269,22 @@ public abstract class FSUtils {
    */
   public static Path getWALTableDir(final Configuration conf, final TableName tableName)
       throws IOException {
-    return new Path(new Path(getWALRootDir(conf), tableName.getNamespaceAsString()),
-        tableName.getQualifierAsString());
+    Path baseDir = new Path(getWALRootDir(conf), HConstants.BASE_NAMESPACE_DIR);
+    return new Path(new Path(baseDir, tableName.getNamespaceAsString()),
+      tableName.getQualifierAsString());
+  }
+
+  /**
+   * For backward compatibility with HBASE-20734, where we store recovered edits in a wrong
+   * directory without BASE_NAMESPACE_DIR. See HBASE-22617 for more details.
+   * @deprecated For compatibility, will be removed in 4.0.0.
+   */
+  @Deprecated
+  public static Path getWrongWALRegionDir(final Configuration conf, final TableName tableName,
+      final String encodedRegionName) throws IOException {
+    Path wrongTableDir = new Path(new Path(getWALRootDir(conf), tableName.getNamespaceAsString()),
+      tableName.getQualifierAsString());
+    return new Path(wrongTableDir, encodedRegionName);
   }
 
   /**
@@ -1507,6 +1530,14 @@ public abstract class FSUtils {
     }
   }
 
+  public static Path getRegionDirFromRootDir(Path rootDir, HRegionInfo region) {
+    return getRegionDirFromTableDir(getTableDir(rootDir, region.getTable()), region);
+  }
+
+  public static Path getRegionDirFromTableDir(Path tableDir, HRegionInfo region) {
+    return new Path(tableDir, ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName());
+  }
+
   /**
    * Given a particular table dir, return all the regiondirs inside it, excluding files such as
    * .tableinfo
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
index 937e9b2..7a50781 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 
 /**
@@ -78,15 +77,12 @@ public class HFileArchiveUtil {
    * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
    *         should not be archived
    */
-  public static Path getRegionArchiveDir(Path rootDir,
-                                         TableName tableName,
-                                         Path regiondir) {
+  public static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir) {
     // get the archive directory for a table
     Path archiveDir = getTableArchivePath(rootDir, tableName);
-
     // then add on the region path under the archive
     String encodedRegionName = regiondir.getName();
-    return HRegion.getRegionDir(archiveDir, encodedRegionName);
+    return new Path(archiveDir, encodedRegionName);
   }
 
   /**
@@ -94,14 +90,15 @@ public class HFileArchiveUtil {
    * @param rootDir {@link Path} to the root directory where hbase files are stored (for building
    *          the archive path)
    * @param tableName name of the table to archive. Cannot be null.
+   * @param encodedRegionName encoded region name
    * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
    *         should not be archived
    */
-  public static Path getRegionArchiveDir(Path rootDir,
-                                         TableName tableName, String encodedRegionName) {
+  public static Path getRegionArchiveDir(Path rootDir, TableName tableName,
+      String encodedRegionName) {
     // get the archive directory for a table
     Path archiveDir = getTableArchivePath(rootDir, tableName);
-    return HRegion.getRegionDir(archiveDir, encodedRegionName);
+    return new Path(archiveDir, encodedRegionName);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 50562f5..2105e77 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -550,9 +550,9 @@ public class WALSplitter {
   static Path getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
       String tmpDirName, Configuration conf) throws IOException {
     FileSystem walFS = FSUtils.getWALFileSystem(conf);
-    Path tableDir = FSUtils.getWALTableDir(conf, logEntry.getKey().getTablename());
+    TableName tableName = logEntry.getKey().getTablename();
     String encodedRegionName = Bytes.toString(logEntry.getKey().getEncodedRegionName());
-    Path regionDir = HRegion.getRegionDir(tableDir, encodedRegionName);
+    Path regionDir = FSUtils.getWALRegionDir(conf, tableName, encodedRegionName);
     Path dir = getRegionDirRecoveredEditsDir(regionDir);
 
     if (walFS.exists(dir) && walFS.isFile(dir)) {
@@ -560,8 +560,7 @@ public class WALSplitter {
       if (!walFS.exists(tmp)) {
         walFS.mkdirs(tmp);
       }
-      tmp = new Path(tmp,
-        HConstants.RECOVERED_EDITS_DIR + "_" + encodedRegionName);
+      tmp = new Path(tmp, HConstants.RECOVERED_EDITS_DIR + "_" + encodedRegionName);
       LOG.warn("Found existing old file: " + dir + ". It could be some "
         + "leftover of an old installation. It should be a folder instead. "
         + "So moving it to " + tmp);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
index ec89974..e904711 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
@@ -136,7 +136,7 @@ public class TestHFileArchiving {
 
     // now attempt to depose the region
     Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
-    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
+    Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, region.getRegionInfo());
 
     HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
 
@@ -188,7 +188,7 @@ public class TestHFileArchiving {
 
     // make sure there are some files in the regiondir
     Path rootDir = FSUtils.getRootDir(fs.getConf());
-    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
+    Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, region.getRegionInfo());
     FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null);
     Assert.assertNotNull("No files in the region directory", regionFiles);
     if (LOG.isDebugEnabled()) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java
index e4d09c3..e494bd8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.io;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.Assert;
@@ -31,7 +30,6 @@ import org.junit.experimental.categories.Category;
 import java.util.regex.Matcher;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -95,7 +93,7 @@ public class TestHFileLink {
 
     for(TableName refTable : refTables) {
       Path refTableDir = FSUtils.getTableDir(archiveDir, refTable);
-      Path refRegionDir = HRegion.getRegionDir(refTableDir, encodedRegion);
+      Path refRegionDir = new Path(refTableDir, encodedRegion);
       Path refDir = new Path(refRegionDir, cf);
       Path refLinkDir = new Path(refDir, linkDir);
       String refStoreFileName = refTable.getNameAsString().replace(
@@ -107,7 +105,7 @@ public class TestHFileLink {
 
       for( TableName tableName : tableNames) {
         Path tableDir = FSUtils.getTableDir(rootDir, tableName);
-        Path regionDir = HRegion.getRegionDir(tableDir, encodedRegion);
+        Path regionDir = new Path(tableDir, encodedRegion);
         Path cfDir = new Path(regionDir, cf);
 
         //Verify back reference creation
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 0f712e7..e42aebe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -88,7 +88,6 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -263,7 +262,7 @@ public class TestDistributedLogSplitting {
         Path tdir = FSUtils.getTableDir(rootdir, table);
         Path editsdir =
             WALSplitter.getRegionDirRecoveredEditsDir(
-                HRegion.getRegionDir(tdir, hri.getEncodedName()));
+                FSUtils.getRegionDirFromTableDir(tdir, hri));
         LOG.debug("checking edits dir " + editsdir);
         FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
           @Override
@@ -873,7 +872,7 @@ public class TestDistributedLogSplitting {
       for (HRegionInfo hri : regions) {
         Path editsdir =
             WALSplitter.getRegionDirRecoveredEditsDir(
-                HRegion.getRegionDir(tdir, hri.getEncodedName()));
+              FSUtils.getRegionDirFromTableDir(tdir, hri));
         LOG.debug("checking edits dir " + editsdir);
         if(!fs.exists(editsdir)) continue;
         FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
@@ -903,7 +902,7 @@ public class TestDistributedLogSplitting {
       for (HRegionInfo hri : regions) {
         Path editsdir =
             WALSplitter.getRegionDirRecoveredEditsDir(
-                HRegion.getRegionDir(tdir, hri.getEncodedName()));
+              FSUtils.getRegionDirFromTableDir(tdir, hri));
         fs.delete(editsdir, true);
       }
       disablingHT.close();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
index e5f842b..437f1fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
@@ -319,7 +319,7 @@ public class TestRegionMergeTransaction {
     assertEquals(rowCountOfRegionB, rowCountOfRegionB2);
 
     // Assert rollback cleaned up stuff in fs
-    assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir,
+    assertTrue(!this.fs.exists(FSUtils.getRegionDirFromRootDir(this.testdir,
         mt.getMergedRegionInfo())));
 
     assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
index f13dd61..4c1335c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
@@ -346,8 +346,10 @@ public class TestSplitTransaction {
     assertEquals(parentRowCount, parentRowCount2);
 
     // Assert rollback cleaned up stuff in fs
-    assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getFirstDaughter())));
-    assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getSecondDaughter())));
+    assertTrue(!this.fs.exists(FSUtils.getRegionDirFromRootDir(this.testdir,
+      st.getFirstDaughter())));
+    assertTrue(!this.fs.exists(FSUtils.getRegionDirFromRootDir(this.testdir,
+      st.getSecondDaughter())));
     assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
 
     // Now retry the split but do not throw an exception this time.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
index 49f8534..954aa8b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
@@ -66,7 +66,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.wal.FaultySequenceFileLogReader;
 import org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter;
 import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
@@ -1235,7 +1234,7 @@ public class TestWALSplit {
       throws IOException {
     Path tdir = FSUtils.getWALTableDir(conf, table);
     @SuppressWarnings("deprecation")
-    Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir,
+    Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(new Path(tdir,
         Bytes.toString(region.getBytes())));
     FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
       @Override


[hbase] 02/02: HBASE-22629 Remove TestReplicationDroppedTables from branch-1

Posted by ap...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit e94908c1529978fb88fc8b531e0033e6c32bc54b
Author: Andrew Purtell <ap...@apache.org>
AuthorDate: Tue Jun 25 18:25:22 2019 -0700

    HBASE-22629 Remove TestReplicationDroppedTables from branch-1
    
    Signed-off-by: Xu Cang <xc...@apache.org>
---
 .../replication/TestReplicationDroppedTables.java  | 292 ---------------------
 1 file changed, 292 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java
deleted file mode 100644
index 6c00047..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.JVMClusterUtil;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.fail;
-
-import java.util.ArrayList;
-import java.util.List;
-
-@Category(LargeTests.class)
-public class TestReplicationDroppedTables extends TestReplicationBase {
-  private static final Log LOG = LogFactory.getLog(TestReplicationDroppedTables.class);
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @Before
-  public void setUp() throws Exception {
-    // Starting and stopping replication can make us miss new logs,
-    // rolling like this makes sure the most recent one gets added to the queue
-    for ( JVMClusterUtil.RegionServerThread r :
-        utility1.getHBaseCluster().getRegionServerThreads()) {
-      utility1.getHBaseAdmin().rollWALWriter(r.getRegionServer().getServerName());
-    }
-    int rowCount = utility1.countRows(tableName);
-    utility1.deleteTableData(tableName);
-    // truncating the table will send one Delete per row to the slave cluster
-    // in an async fashion, which is why we cannot just call deleteTableData on
-    // utility2 since late writes could make it to the slave in some way.
-    // Instead, we truncate the first table and wait for all the Deletes to
-    // make it to the slave.
-    Scan scan = new Scan();
-    int lastCount = 0;
-    for (int i = 0; i < NB_RETRIES; i++) {
-      if (i==NB_RETRIES-1) {
-        fail("Waited too much time for truncate");
-      }
-      ResultScanner scanner = htable2.getScanner(scan);
-      Result[] res = scanner.next(rowCount);
-      scanner.close();
-      if (res.length != 0) {
-        if (res.length < lastCount) {
-          i--; // Don't increment timeout if we make progress
-        }
-        lastCount = res.length;
-        LOG.info("Still got " + res.length + " rows");
-        Thread.sleep(SLEEP_TIME);
-      } else {
-        break;
-      }
-    }
-  }
-
-  @Test(timeout = 600000)
-  public void testEditsStuckBehindDroppedTable() throws Exception {
-    // Sanity check
-    // Make sure by default edits for dropped tables stall the replication queue, even when the
-    // table(s) in question have been deleted on both ends.
-    testEditsBehindDroppedTable(false, "test_dropped");
-  }
-
-  @Test(timeout = 600000)
-  public void testEditsDroppedWithDroppedTable() throws Exception {
-    // Make sure by default edits for dropped tables are themselves dropped when the
-    // table(s) in question have been deleted on both ends.
-    testEditsBehindDroppedTable(true, "test_dropped");
-  }
-
-  @Test(timeout = 600000)
-  public void testEditsDroppedWithDroppedTableNS() throws Exception {
-    // also try with a namespace
-    Connection connection1 = ConnectionFactory.createConnection(conf1);
-    try (Admin admin1 = connection1.getAdmin()) {
-      admin1.createNamespace(NamespaceDescriptor.create("NS").build());
-    }
-    Connection connection2 = ConnectionFactory.createConnection(conf2);
-    try (Admin admin2 = connection2.getAdmin()) {
-      admin2.createNamespace(NamespaceDescriptor.create("NS").build());
-    }
-    testEditsBehindDroppedTable(true, "NS:test_dropped");
-  }
-
-  private void testEditsBehindDroppedTable(boolean allowProceeding, String tName) throws Exception {
-    conf1.setBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, allowProceeding);
-    conf1.setInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, 1);
-
-    // make sure we have a single region server only, so that all
-    // edits for all tables go there
-    utility1.shutdownMiniHBaseCluster();
-    utility1.startMiniHBaseCluster(1, 1);
-
-    TableName tablename = TableName.valueOf(tName);
-    byte[] familyname = Bytes.toBytes("fam");
-    byte[] row = Bytes.toBytes("row");
-
-    HTableDescriptor table = new HTableDescriptor(tablename);
-    HColumnDescriptor fam = new HColumnDescriptor(familyname);
-    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
-    table.addFamily(fam);
-
-    Connection connection1 = ConnectionFactory.createConnection(conf1);
-    Connection connection2 = ConnectionFactory.createConnection(conf2);
-    try (Admin admin1 = connection1.getAdmin()) {
-      admin1.createTable(table);
-    }
-    try (Admin admin2 = connection2.getAdmin()) {
-      admin2.createTable(table);
-    }
-    utility1.waitUntilAllRegionsAssigned(tablename);
-    utility2.waitUntilAllRegionsAssigned(tablename);
-
-    Table lHtable1 = utility1.getConnection().getTable(tablename);
-
-    // now suspend replication
-    admin.disablePeer(PEER_ID);
-
-    // put some data (lead with 0 so the edit gets sorted before the other table's edits
-    //   in the replication batch)
-    // write a bunch of edits, making sure we fill a batch
-    byte[] rowkey = Bytes.toBytes(0+" put on table to be dropped");
-    Put put = new Put(rowkey);
-    put.addColumn(familyname, row, row);
-    lHtable1.put(put);
-
-    rowkey = Bytes.toBytes("normal put");
-    put = new Put(rowkey);
-    put.addColumn(famName, row, row);
-    htable1.put(put);
-
-    try (Admin admin1 = connection1.getAdmin()) {
-      admin1.disableTable(tablename);
-      admin1.deleteTable(tablename);
-    }
-    try (Admin admin2 = connection2.getAdmin()) {
-      admin2.disableTable(tablename);
-      admin2.deleteTable(tablename);
-    }
-
-    admin.enablePeer(PEER_ID);
-    if (allowProceeding) {
-      // in this we'd expect the key to make it over
-      verifyReplicationProceeded(rowkey);
-    } else {
-      verifyReplicationStuck(rowkey);
-    }
-    // just to be safe
-    conf1.setBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
-  }
-
-  @Test(timeout = 600000)
-  public void testEditsBehindDroppedTableTiming() throws Exception {
-    conf1.setBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, true);
-    conf1.setInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, 1);
-
-    // make sure we have a single region server only, so that all
-    // edits for all tables go there
-    utility1.shutdownMiniHBaseCluster();
-    utility1.startMiniHBaseCluster(1, 1);
-
-    TableName tablename = TableName.valueOf("testdroppedtimed");
-    byte[] familyname = Bytes.toBytes("fam");
-    byte[] row = Bytes.toBytes("row");
-
-    HTableDescriptor table = new HTableDescriptor(tablename);
-    HColumnDescriptor fam = new HColumnDescriptor(familyname);
-    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
-    table.addFamily(fam);
-
-    Connection connection1 = ConnectionFactory.createConnection(conf1);
-    Connection connection2 = ConnectionFactory.createConnection(conf2);
-    try (Admin admin1 = connection1.getAdmin()) {
-      admin1.createTable(table);
-    }
-    try (Admin admin2 = connection2.getAdmin()) {
-      admin2.createTable(table);
-    }
-    utility1.waitUntilAllRegionsAssigned(tablename);
-    utility2.waitUntilAllRegionsAssigned(tablename);
-
-    Table lHtable1 = utility1.getConnection().getTable(tablename);
-
-    // now suspend replication
-    admin.disablePeer(PEER_ID);
-
-    // put some data (lead with 0 so the edit gets sorted before the other table's edits
-    //   in the replication batch)
-    // write a bunch of edits, making sure we fill a batch
-    byte[] rowkey = Bytes.toBytes(0+" put on table to be dropped");
-    Put put = new Put(rowkey);
-    put.addColumn(familyname, row, row);
-    lHtable1.put(put);
-
-    rowkey = Bytes.toBytes("normal put");
-    put = new Put(rowkey);
-    put.addColumn(famName, row, row);
-    htable1.put(put);
-
-    try (Admin admin2 = connection2.getAdmin()) {
-      admin2.disableTable(tablename);
-      admin2.deleteTable(tablename);
-    }
-
-    admin.enablePeer(PEER_ID);
-    // edit should still be stuck
-
-    try (Admin admin1 = connection1.getAdmin()) {
-      // the source table still exists, replication should be stalled
-      verifyReplicationStuck(rowkey);
-
-      admin1.disableTable(tablename);
-      // still stuck, source table still exists
-      verifyReplicationStuck(rowkey);
-
-      admin1.deleteTable(tablename);
-      // now the source table is gone, replication should proceed, the
-      // offending edits be dropped
-      verifyReplicationProceeded(rowkey);
-    }
-    // just to be safe
-    conf1.setBoolean(HConstants.REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
-  }
-
-  private void verifyReplicationProceeded(byte[] rowkey) throws Exception {
-    Get get = new Get(rowkey);
-    for (int i = 0; i < NB_RETRIES; i++) {
-      if (i==NB_RETRIES-1) {
-        fail("Waited too much time for put replication");
-      }
-      Result res = htable2.get(get);
-      if (res.size() == 0) {
-        LOG.info("Row not available");
-        Thread.sleep(SLEEP_TIME);
-      } else {
-        assertArrayEquals(res.getRow(), rowkey);
-        break;
-      }
-    }
-  }
-
-  private void verifyReplicationStuck(byte[] rowkey) throws Exception {
-    Get get = new Get(rowkey);
-    for (int i = 0; i < NB_RETRIES; i++) {
-      Result res = htable2.get(get);
-      if (res.size() >= 1) {
-        fail("Edit should have been stuck behind dropped tables");
-      } else {
-        LOG.info("Row not replicated, let's wait a bit more...");
-        Thread.sleep(SLEEP_TIME);
-      }
-    }
-  }
-}