You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2019/06/25 13:20:44 UTC
[hbase] branch branch-2.0 updated: HBASE-22617 Recovered WAL
directories not getting cleaned up (#330)
This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.0 by this push:
new 90a7930 HBASE-22617 Recovered WAL directories not getting cleaned up (#330)
90a7930 is described below
commit 90a79306252a53012dcdf3a08589588e3dfcb14b
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Jun 25 16:21:24 2019 +0800
HBASE-22617 Recovered WAL directories not getting cleaned up (#330)
Signed-off-by: Guanghao Zhang <zg...@apache.org>
Signed-off-by: Andrew Purtell <ap...@apache.org>
---
.../apache/hadoop/hbase/util/CommonFSUtils.java | 28 +++--
.../apache/hadoop/hbase/backup/HFileArchiver.java | 9 +-
.../hadoop/hbase/master/MasterFileSystem.java | 9 +-
.../hbase/master/assignment/GCRegionProcedure.java | 87 +++++++++------
.../assignment/MergeTableRegionsProcedure.java | 10 +-
.../assignment/SplitTableRegionProcedure.java | 17 ++-
.../master/procedure/DeleteTableProcedure.java | 14 ++-
.../master/procedure/DisableTableProcedure.java | 8 +-
.../hbase/master/procedure/MasterProcedureEnv.java | 5 +
.../apache/hadoop/hbase/regionserver/HRegion.java | 117 +++++++++------------
.../hbase/regionserver/HRegionFileSystem.java | 19 ++--
.../java/org/apache/hadoop/hbase/util/FSUtils.java | 44 ++++----
.../org/apache/hadoop/hbase/wal/WALSplitter.java | 53 ++++++++--
.../hadoop/hbase/backup/TestHFileArchiving.java | 4 +-
.../master/procedure/TestCreateTableProcedure.java | 2 +-
.../procedure/TestTruncateTableProcedure.java | 2 +-
.../hbase/regionserver/TestRecoveredEdits.java | 2 +-
17 files changed, 253 insertions(+), 177 deletions(-)
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index bf0d792..e1e3e76 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -28,7 +28,6 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
-
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -425,11 +424,9 @@ public abstract class CommonFSUtils {
* @return the region directory used to store WALs under the WALRootDir
* @throws IOException if there is an exception determining the WALRootDir
*/
- public static Path getWALRegionDir(final Configuration conf,
- final TableName tableName, final String encodedRegionName)
- throws IOException {
- return new Path(getWALTableDir(conf, tableName),
- encodedRegionName);
+ public static Path getWALRegionDir(final Configuration conf, final TableName tableName,
+ final String encodedRegionName) throws IOException {
+ return new Path(getWALTableDir(conf, tableName), encodedRegionName);
}
/**
@@ -441,8 +438,22 @@ public abstract class CommonFSUtils {
*/
public static Path getWALTableDir(final Configuration conf, final TableName tableName)
throws IOException {
- return new Path(new Path(getWALRootDir(conf), tableName.getNamespaceAsString()),
- tableName.getQualifierAsString());
+ Path baseDir = new Path(getWALRootDir(conf), HConstants.BASE_NAMESPACE_DIR);
+ return new Path(new Path(baseDir, tableName.getNamespaceAsString()),
+ tableName.getQualifierAsString());
+ }
+
+ /**
+ * For backward compatibility with HBASE-20734, where we store recovered edits in a wrong
+ * directory without BASE_NAMESPACE_DIR. See HBASE-22617 for more details.
+ * @deprecated For compatibility, will be removed in 4.0.0.
+ */
+ @Deprecated
+ public static Path getWrongWALRegionDir(final Configuration conf, final TableName tableName,
+ final String encodedRegionName) throws IOException {
+ Path wrongTableDir = new Path(new Path(getWALRootDir(conf), tableName.getNamespaceAsString()),
+ tableName.getQualifierAsString());
+ return new Path(wrongTableDir, encodedRegionName);
}
/**
@@ -1055,5 +1066,4 @@ public abstract class CommonFSUtils {
super(message);
}
}
-
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index f5496ea..081da14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -77,23 +76,21 @@ public class HFileArchiver {
public static boolean exists(Configuration conf, FileSystem fs, RegionInfo info)
throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
- Path regionDir = HRegion.getRegionDir(rootDir, info);
+ Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, info);
return fs.exists(regionDir);
}
/**
- * Cleans up all the files for a HRegion by archiving the HFiles to the
- * archive directory
+ * Cleans up all the files for a HRegion by archiving the HFiles to the archive directory
* @param conf the configuration to use
* @param fs the file system object
* @param info RegionInfo for region to be deleted
- * @throws IOException
*/
public static void archiveRegion(Configuration conf, FileSystem fs, RegionInfo info)
throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()),
- HRegion.getRegionDir(rootDir, info));
+ FSUtils.getRegionDirFromRootDir(rootDir, info));
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 5cb4ff4..9921fcd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.master;
import java.io.IOException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -192,7 +191,9 @@ public class MasterFileSystem {
return this.fs;
}
- protected FileSystem getWALFileSystem() { return this.walFs; }
+ public FileSystem getWALFileSystem() {
+ return this.walFs;
+ }
public Configuration getConfiguration() {
return this.conf;
@@ -210,7 +211,9 @@ public class MasterFileSystem {
* different if hfiles on one fs and WALs on another. The 'WALs' dir gets made underneath
* the root dir returned here; i.e. this is '/hbase', not '/hbase/WALs'.
*/
- public Path getWALRootDir() { return this.walRootDir; }
+ public Path getWALRootDir() {
+ return this.walRootDir;
+ }
/**
* @return HBase temp dir.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
index 0b6e45b..efd9bea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
@@ -18,22 +18,26 @@
package org.apache.hadoop.hbase.master.assignment;
import java.io.IOException;
-
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCRegionState;
@@ -64,46 +68,65 @@ public class GCRegionProcedure extends AbstractStateMachineRegionProcedure<GCReg
@Override
protected Flow executeFromState(MasterProcedureEnv env, GCRegionState state)
- throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
+ throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(this + " execute state=" + state);
}
MasterServices masterServices = env.getMasterServices();
try {
switch (state) {
- case GC_REGION_PREPARE:
- // Nothing to do to prepare.
- setNextState(GCRegionState.GC_REGION_ARCHIVE);
- break;
- case GC_REGION_ARCHIVE:
- FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
- if (HFileArchiver.exists(masterServices.getConfiguration(), fs, getRegion())) {
- if (LOG.isDebugEnabled()) LOG.debug("Archiving region=" + getRegion().getShortNameToLog());
- HFileArchiver.archiveRegion(masterServices.getConfiguration(), fs, getRegion());
- }
- setNextState(GCRegionState.GC_REGION_PURGE_METADATA);
- break;
- case GC_REGION_PURGE_METADATA:
- // TODO: Purge metadata before removing from HDFS? This ordering is copied
- // from CatalogJanitor.
- AssignmentManager am = masterServices.getAssignmentManager();
- if (am != null) {
- if (am.getRegionStates() != null) {
- am.getRegionStates().deleteRegion(getRegion());
+ case GC_REGION_PREPARE:
+ // Nothing to do to prepare.
+ setNextState(GCRegionState.GC_REGION_ARCHIVE);
+ break;
+ case GC_REGION_ARCHIVE:
+ MasterFileSystem mfs = masterServices.getMasterFileSystem();
+ FileSystem fs = mfs.getFileSystem();
+ if (HFileArchiver.exists(masterServices.getConfiguration(), fs, getRegion())) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Archiving region=" + getRegion().getShortNameToLog());
+ }
+ HFileArchiver.archiveRegion(masterServices.getConfiguration(), fs, getRegion());
+ }
+ FileSystem walFs = mfs.getWALFileSystem();
+ // Cleanup the directories on WAL filesystem also
+ Path regionWALDir = FSUtils.getWALRegionDir(env.getMasterConfiguration(),
+ getRegion().getTable(), getRegion().getEncodedName());
+ if (walFs.exists(regionWALDir)) {
+ if (!walFs.delete(regionWALDir, true)) {
+ LOG.debug("Failed to delete {}", regionWALDir);
+ }
+ }
+ Path wrongRegionWALDir = FSUtils.getWrongWALRegionDir(env.getMasterConfiguration(),
+ getRegion().getTable(), getRegion().getEncodedName());
+ if (walFs.exists(wrongRegionWALDir)) {
+ if (!walFs.delete(wrongRegionWALDir, true)) {
+ LOG.debug("Failed to delete {}", regionWALDir);
+ }
+ }
+ setNextState(GCRegionState.GC_REGION_PURGE_METADATA);
+ break;
+ case GC_REGION_PURGE_METADATA:
+ // TODO: Purge metadata before removing from HDFS? This ordering is copied
+ // from CatalogJanitor.
+ AssignmentManager am = masterServices.getAssignmentManager();
+ if (am != null) {
+ if (am.getRegionStates() != null) {
+ am.getRegionStates().deleteRegion(getRegion());
+ }
+ }
+ MetaTableAccessor.deleteRegion(masterServices.getConnection(), getRegion());
+ masterServices.getServerManager().removeRegion(getRegion());
+ FavoredNodesManager fnm = masterServices.getFavoredNodesManager();
+ if (fnm != null) {
+ fnm.deleteFavoredNodesForRegions(Lists.newArrayList(getRegion()));
}
- }
- MetaTableAccessor.deleteRegion(masterServices.getConnection(), getRegion());
- masterServices.getServerManager().removeRegion(getRegion());
- FavoredNodesManager fnm = masterServices.getFavoredNodesManager();
- if (fnm != null) {
- fnm.deleteFavoredNodesForRegions(Lists.newArrayList(getRegion()));
- }
- return Flow.NO_MORE_STATE;
- default:
- throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
}
} catch (IOException ioe) {
- // TODO: This is going to spew log?
+ // TODO: This is going to spew log? Add retry backoff
LOG.warn("Error trying to GC " + getRegion().getShortNameToLog() + "; retrying...", ioe);
}
return Flow.HAS_MORE_STATE;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index a3ec9cc..4ef63c7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -824,16 +824,16 @@ public class MergeTableRegionsProcedure
}
private void writeMaxSequenceIdFile(MasterProcedureEnv env) throws IOException {
- FileSystem walFS = env.getMasterServices().getMasterWalManager().getFileSystem();
+ MasterFileSystem fs = env.getMasterFileSystem();
long maxSequenceId = -1L;
for (RegionInfo region : regionsToMerge) {
maxSequenceId =
- Math.max(maxSequenceId, WALSplitter.getMaxRegionSequenceId(
- walFS, getWALRegionDir(env, region)));
+ Math.max(maxSequenceId, WALSplitter.getMaxRegionSequenceId(env.getMasterConfiguration(),
+ region, fs::getFileSystem, fs::getWALFileSystem));
}
if (maxSequenceId > 0) {
- WALSplitter.writeRegionSequenceIdFile(walFS, getWALRegionDir(env, mergedRegion),
- maxSequenceId);
+ WALSplitter.writeRegionSequenceIdFile(fs.getWALFileSystem(),
+ getWALRegionDir(env, mergedRegion), maxSequenceId);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 493e42e..18cf850 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -33,7 +33,6 @@ import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -598,7 +597,7 @@ public class SplitTableRegionProcedure
final FileSystem fs = mfs.getFileSystem();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
env.getMasterConfiguration(), fs, tabledir, getParentRegion(), false);
- regionFs.createSplitsDir();
+ regionFs.createSplitsDir(daughter_1_RI, daughter_2_RI);
Pair<Integer, Integer> expectedReferences = splitStoreFiles(env, regionFs);
@@ -883,14 +882,14 @@ public class SplitTableRegionProcedure
}
private void writeMaxSequenceIdFile(MasterProcedureEnv env) throws IOException {
- FileSystem walFS = env.getMasterServices().getMasterWalManager().getFileSystem();
- long maxSequenceId =
- WALSplitter.getMaxRegionSequenceId(walFS, getWALRegionDir(env, getParentRegion()));
+ MasterFileSystem fs = env.getMasterFileSystem();
+ long maxSequenceId = WALSplitter.getMaxRegionSequenceId(env.getMasterConfiguration(),
+ getParentRegion(), fs::getFileSystem, fs::getWALFileSystem);
if (maxSequenceId > 0) {
- WALSplitter.writeRegionSequenceIdFile(walFS, getWALRegionDir(env, daughter_1_RI),
- maxSequenceId);
- WALSplitter.writeRegionSequenceIdFile(walFS, getWALRegionDir(env, daughter_2_RI),
- maxSequenceId);
+ WALSplitter.writeRegionSequenceIdFile(fs.getWALFileSystem(),
+ getWALRegionDir(env, daughter_1_RI), maxSequenceId);
+ WALSplitter.writeRegionSequenceIdFile(fs.getWALFileSystem(),
+ getWALRegionDir(env, daughter_2_RI), maxSequenceId);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 060af01..80c7282 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.procedure;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -43,11 +42,11 @@ import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -311,8 +310,8 @@ public class DeleteTableProcedure
if (archive) {
for (RegionInfo hri : regions) {
LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
- HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
- tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir,
+ FSUtils.getRegionDirFromTableDir(tempTableDir, hri));
}
LOG.debug("Table '" + tableName + "' archived!");
}
@@ -337,6 +336,13 @@ public class DeleteTableProcedure
throw new IOException("Couldn't delete mob dir " + mobTableDir);
}
}
+
+ // Delete the directory on wal filesystem
+ FileSystem walFs = mfs.getWALFileSystem();
+ Path tableWALDir = FSUtils.getWALTableDir(env.getMasterConfiguration(), tableName);
+ if (walFs.exists(tableWALDir) && !walFs.delete(tableWALDir, true)) {
+ throw new IOException("Couldn't delete table dir on wal filesystem" + tableWALDir);
+ }
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index 0578402..7e538ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -19,20 +19,20 @@
package org.apache.hadoop.hbase.master.procedure;
import java.io.IOException;
-
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableState;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
index c82a7b5..8fd9537 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
@@ -29,6 +29,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -114,6 +115,10 @@ public class MasterProcedureEnv implements ConfigurationObserver {
return remoteDispatcher;
}
+ public MasterFileSystem getMasterFileSystem() {
+ return master.getMasterFileSystem();
+ }
+
public boolean isRunning() {
if (this.master == null || this.master.getMasterProcedureExecutor() == null) return false;
return master.getMasterProcedureExecutor().isRunning();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 21ca098..b8630bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -183,6 +183,7 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
@@ -974,16 +975,28 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// Use maximum of log sequenceid or that which was found in stores
// (particularly if no recovered edits, seqid will be -1).
- long maxSeqIdFromFile =
- WALSplitter.getMaxRegionSequenceId(getWalFileSystem(), getWALRegionDirOfDefaultReplica());
+ // always get openSeqNum from the default replica, even if we are secondary replicas
+ long maxSeqIdFromFile = WALSplitter.getMaxRegionSequenceId(conf,
+ RegionReplicaUtil.getRegionInfoForDefaultReplica(getRegionInfo()), this::getFilesystem,
+ this::getWalFileSystem);
long nextSeqId = Math.max(maxSeqId, maxSeqIdFromFile) + 1;
// The openSeqNum will always be increase even for read only region, as we rely on it to
- // determine whether a region has been successfully reopend, so here we always need to update
+ // determine whether a region has been successfully reopened, so here we always need to update
// the max sequence id file.
if (RegionReplicaUtil.isDefaultReplica(getRegionInfo())) {
LOG.debug("writing seq id for {}", this.getRegionInfo().getEncodedName());
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), getWALRegionDir(), nextSeqId);
- //WALSplitter.writeRegionSequenceIdFile(getWalFileSystem(), getWALRegionDir(), nextSeqId - 1);
+ // This means we have replayed all the recovered edits and also written out the max sequence
+ // id file, let's delete the wrong directories introduced in HBASE-20734, see HBASE-22617
+ // for more details.
+ Path wrongRegionWALDir = FSUtils.getWrongWALRegionDir(conf, getRegionInfo().getTable(),
+ getRegionInfo().getEncodedName());
+ FileSystem walFs = getWalFileSystem();
+ if (walFs.exists(wrongRegionWALDir)) {
+ if (!walFs.delete(wrongRegionWALDir, true)) {
+ LOG.debug("Failed to clean up wrong region WAL directory {}", wrongRegionWALDir);
+ }
+ }
}
LOG.info("Opened {}; next sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
@@ -1883,19 +1896,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return regionDir;
}
- /**
- * @return the Region directory under WALRootDirectory; in case of secondary replica return the
- * region directory corresponding to its default replica
- * @throws IOException if there is an error getting WALRootDir
- */
- private Path getWALRegionDirOfDefaultReplica() throws IOException {
- RegionInfo regionInfo = getRegionInfo();
- if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
- regionInfo = RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo);
- }
- return FSUtils.getWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName());
- }
-
@Override
public long getEarliestFlushTimeForAllStores() {
return Collections.min(lastStoreFlushTimeMap.values());
@@ -4441,63 +4441,65 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
minSeqIdForTheRegion = maxSeqIdInStore;
}
}
- long seqid = minSeqIdForTheRegion;
+ long seqId = minSeqIdForTheRegion;
FileSystem walFS = getWalFileSystem();
FileSystem rootFS = getFilesystem();
- Path regionDir = getWALRegionDir();
- Path defaultRegionDir = getRegionDir(FSUtils.getRootDir(conf), getRegionInfo());
-
+ Path wrongRegionWALDir = FSUtils.getWrongWALRegionDir(conf, getRegionInfo().getTable(),
+ getRegionInfo().getEncodedName());
+ Path regionWALDir = getWALRegionDir();
+ Path regionDir = FSUtils.getRegionDirFromRootDir(FSUtils.getRootDir(conf), getRegionInfo());
+
+ // We made a mistake in HBASE-20734 so we need to do this dirty hack...
+ NavigableSet<Path> filesUnderWrongRegionWALDir =
+ WALSplitter.getSplitEditFilesSorted(walFS, wrongRegionWALDir);
+ seqId = Math.max(seqId, replayRecoveredEditsForPaths(minSeqIdForTheRegion, walFS,
+ filesUnderWrongRegionWALDir, reporter, regionDir));
// This is to ensure backwards compatability with HBASE-20723 where recovered edits can appear
// under the root dir even if walDir is set.
- NavigableSet<Path> filesUnderRootDir = null;
- if (!regionDir.equals(defaultRegionDir)) {
- filesUnderRootDir =
- WALSplitter.getSplitEditFilesSorted(rootFS, defaultRegionDir);
- seqid = Math.max(seqid,
- replayRecoveredEditsForPaths(minSeqIdForTheRegion, rootFS, filesUnderRootDir, reporter,
- defaultRegionDir));
+ NavigableSet<Path> filesUnderRootDir = Collections.emptyNavigableSet();
+ if (!regionWALDir.equals(regionDir)) {
+ filesUnderRootDir = WALSplitter.getSplitEditFilesSorted(rootFS, regionDir);
+ seqId = Math.max(seqId, replayRecoveredEditsForPaths(minSeqIdForTheRegion, rootFS,
+ filesUnderRootDir, reporter, regionDir));
}
- NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(walFS, regionDir);
- seqid = Math.max(seqid, replayRecoveredEditsForPaths(minSeqIdForTheRegion, walFS,
- files, reporter, regionDir));
+ NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(walFS, regionWALDir);
+ seqId = Math.max(seqId, replayRecoveredEditsForPaths(minSeqIdForTheRegion, walFS,
+ files, reporter, regionWALDir));
- if (seqid > minSeqIdForTheRegion) {
+ if (seqId > minSeqIdForTheRegion) {
// Then we added some edits to memory. Flush and cleanup split edit files.
- internalFlushcache(null, seqid, stores.values(), status, false, FlushLifeCycleTracker.DUMMY);
+ internalFlushcache(null, seqId, stores.values(), status, false, FlushLifeCycleTracker.DUMMY);
}
- // Now delete the content of recovered edits. We're done w/ them.
+ // Now delete the content of recovered edits. We're done w/ them.
if (files.size() > 0 && this.conf.getBoolean("hbase.region.archive.recovered.edits", false)) {
// For debugging data loss issues!
// If this flag is set, make use of the hfile archiving by making recovered.edits a fake
// column family. Have to fake out file type too by casting our recovered.edits as storefiles
- String fakeFamilyName = WALSplitter.getRegionDirRecoveredEditsDir(regionDir).getName();
+ String fakeFamilyName = WALSplitter.getRegionDirRecoveredEditsDir(regionWALDir).getName();
Set<HStoreFile> fakeStoreFiles = new HashSet<>(files.size());
- for (Path file: files) {
- fakeStoreFiles.add(
- new HStoreFile(walFS, file, this.conf, null, null, true));
+ for (Path file : files) {
+ fakeStoreFiles.add(new HStoreFile(walFS, file, this.conf, null, null, true));
}
getRegionWALFileSystem().removeStoreFiles(fakeFamilyName, fakeStoreFiles);
} else {
- if (filesUnderRootDir != null) {
- for (Path file : filesUnderRootDir) {
- if (!rootFS.delete(file, false)) {
- LOG.error("Failed delete of {} from under the root directory.", file);
- } else {
- LOG.debug("Deleted recovered.edits under root directory. file=" + file);
- }
+ for (Path file : Iterables.concat(files, filesUnderWrongRegionWALDir)) {
+ if (!walFS.delete(file, false)) {
+ LOG.error("Failed delete of {}", file);
+ } else {
+ LOG.debug("Deleted recovered.edits file={}", file);
}
}
- for (Path file: files) {
- if (!walFS.delete(file, false)) {
- LOG.error("Failed delete of " + file);
+ for (Path file : filesUnderRootDir) {
+ if (!rootFS.delete(file, false)) {
+ LOG.error("Failed delete of {}", file);
} else {
- LOG.debug("Deleted recovered.edits file=" + file);
+ LOG.debug("Deleted recovered.edits file={}", file);
}
}
}
- return seqid;
+ return seqId;
}
private long replayRecoveredEditsForPaths(long minSeqIdForTheRegion, FileSystem fs,
@@ -7251,21 +7253,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
- * Computes the Path of the HRegion
- *
- * @param rootdir qualified path of HBase root directory
- * @param info RegionInfo for the region
- * @return qualified path of region directory
- * @deprecated For tests only; to be removed.
- */
- @Deprecated
- @VisibleForTesting
- public static Path getRegionDir(final Path rootdir, final RegionInfo info) {
- return new Path(
- FSUtils.getTableDir(rootdir, info.getTable()), info.getEncodedName());
- }
-
- /**
* Determines if the specified row is within the row range specified by the
* specified RegionInfo
*
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 37a4309..1021233 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.regionserver;
+import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
@@ -50,13 +51,12 @@ import org.apache.hadoop.hbase.util.FSHDFSUtils;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-import edu.umd.cs.findbugs.annotations.Nullable;
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
/**
* View to an on-disk Region.
@@ -629,19 +629,26 @@ public class HRegionFileSystem {
/**
* Create the region splits directory.
*/
- public void createSplitsDir() throws IOException {
+ public void createSplitsDir(RegionInfo daughterA, RegionInfo daughterB) throws IOException {
Path splitdir = getSplitsDir();
if (fs.exists(splitdir)) {
LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it");
if (!deleteDir(splitdir)) {
- throw new IOException("Failed deletion of " + splitdir
- + " before creating them again.");
+ throw new IOException("Failed deletion of " + splitdir + " before creating them again.");
}
}
// splitDir doesn't exists now. No need to do an exists() call for it.
if (!createDir(splitdir)) {
throw new IOException("Failed create of " + splitdir);
}
+ Path daughterATmpDir = getSplitsDir(daughterA);
+ if (!createDir(daughterATmpDir)) {
+ throw new IOException("Failed create of " + daughterATmpDir);
+ }
+ Path daughterBTmpDir = getSplitsDir(daughterB);
+ if (!createDir(daughterBTmpDir)) {
+ throw new IOException("Failed create of " + daughterBTmpDir);
+ }
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index b51b06b..de15cc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -18,14 +18,7 @@
*/
package org.apache.hadoop.hbase.util;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
-import org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
-
import edu.umd.cs.findbugs.annotations.CheckForNull;
-
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.EOFException;
@@ -55,7 +48,6 @@ import java.util.concurrent.FutureTask;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -72,18 +64,14 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.security.AccessDeniedException;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
@@ -95,6 +83,17 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
+import org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos;
/**
* Utility methods for interacting with the underlying file system.
@@ -707,17 +706,12 @@ public abstract class FSUtils extends CommonFSUtils {
/**
* Checks if meta region exists
- *
* @param fs file system
- * @param rootdir root directory of HBase installation
+ * @param rootDir root directory of HBase installation
* @return true if exists
- * @throws IOException e
*/
- @SuppressWarnings("deprecation")
- public static boolean metaRegionExists(FileSystem fs, Path rootdir)
- throws IOException {
- Path metaRegionDir =
- HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
+ public static boolean metaRegionExists(FileSystem fs, Path rootDir) throws IOException {
+ Path metaRegionDir = getRegionDirFromRootDir(rootDir, RegionInfoBuilder.FIRST_META_REGIONINFO);
return fs.exists(metaRegionDir);
}
@@ -1029,7 +1023,11 @@ public abstract class FSUtils extends CommonFSUtils {
return regionDirs;
}
- public static Path getRegionDir(Path tableDir, RegionInfo region) {
+ public static Path getRegionDirFromRootDir(Path rootDir, RegionInfo region) {
+ return getRegionDirFromTableDir(getTableDir(rootDir, region.getTable()), region);
+ }
+
+ public static Path getRegionDirFromTableDir(Path tableDir, RegionInfo region) {
return new Path(tableDir, ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName());
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 8a245cd..6ee6030 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.CollectionUtils.IOExceptionSupplier;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
@@ -549,12 +550,25 @@ public class WALSplitter {
if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
return false;
}
- //Only default replica region can reach here, so we can use regioninfo
- //directly without converting it to default replica's regioninfo.
- Path regionDir = FSUtils.getWALRegionDir(conf, regionInfo.getTable(),
- regionInfo.getEncodedName());
- NavigableSet<Path> files = getSplitEditFilesSorted(FSUtils.getWALFileSystem(conf), regionDir);
- return files != null && !files.isEmpty();
+ // Only default replica region can reach here, so we can use regioninfo
+ // directly without converting it to default replica's regioninfo.
+ Path regionWALDir =
+ FSUtils.getWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName());
+ Path regionDir = FSUtils.getRegionDirFromRootDir(FSUtils.getRootDir(conf), regionInfo);
+ Path wrongRegionWALDir =
+ FSUtils.getWrongWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName());
+ FileSystem walFs = FSUtils.getWALFileSystem(conf);
+ FileSystem rootFs = FSUtils.getRootDirFileSystem(conf);
+ NavigableSet<Path> files = getSplitEditFilesSorted(walFs, regionWALDir);
+ if (!files.isEmpty()) {
+ return true;
+ }
+ files = getSplitEditFilesSorted(rootFs, regionDir);
+ if (!files.isEmpty()) {
+ return true;
+ }
+ files = getSplitEditFilesSorted(walFs, wrongRegionWALDir);
+ return !files.isEmpty();
}
@@ -706,6 +720,33 @@ public class WALSplitter {
}
/**
+ * This method will check 3 places for finding the max sequence id file. One is the expected
+ * place, another is the old place under the region directory, and the last one is the wrong one
+ * we introduced in HBASE-20734. See HBASE-22617 for more details.
+ * <p/>
+ * Notice that, you should always call this method instead of
+ * {@link #getMaxRegionSequenceId(FileSystem, Path)} until 4.0.0 release.
+ * @deprecated Only for compatibility, will be removed in 4.0.0.
+ */
+ @Deprecated
+ public static long getMaxRegionSequenceId(Configuration conf, RegionInfo region,
+ IOExceptionSupplier<FileSystem> rootFsSupplier, IOExceptionSupplier<FileSystem> walFsSupplier)
+ throws IOException {
+ FileSystem rootFs = rootFsSupplier.get();
+ FileSystem walFs = walFsSupplier.get();
+ Path regionWALDir = FSUtils.getWALRegionDir(conf, region.getTable(), region.getEncodedName());
+ // This is the old place where we store max sequence id file
+ Path regionDir = FSUtils.getRegionDirFromRootDir(FSUtils.getRootDir(conf), region);
+ // This is for HBASE-20734, where we use a wrong directory, see HBASE-22617 for more details.
+ Path wrongRegionWALDir =
+ FSUtils.getWrongWALRegionDir(conf, region.getTable(), region.getEncodedName());
+ long maxSeqId = getMaxRegionSequenceId(walFs, regionWALDir);
+ maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(rootFs, regionDir));
+ maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(walFs, wrongRegionWALDir));
+ return maxSeqId;
+ }
+
+ /**
* Create a new {@link Reader} for reading logs to split.
*
* @param file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
index 20dd184..4e8680f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
@@ -145,7 +145,7 @@ public class TestHFileArchiving {
// now attempt to depose the region
Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
- Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
+ Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, region.getRegionInfo());
HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
@@ -196,7 +196,7 @@ public class TestHFileArchiving {
// make sure there are some files in the regiondir
Path rootDir = FSUtils.getRootDir(fs.getConf());
- Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
+ Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, region.getRegionInfo());
FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null);
Assert.assertNotNull("No files in the region directory", regionFiles);
if (LOG.isDebugEnabled()) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index a6fea37..26686c3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -224,7 +224,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
Path tempdir = mfs.getTempDir();
Path tableDir = FSUtils.getTableDir(tempdir, regionInfo.getTable());
- Path regionDir = FSUtils.getRegionDir(tableDir, regionInfo);
+ Path regionDir = FSUtils.getRegionDirFromTableDir(tableDir, regionInfo);
FileSystem fs = FileSystem.get(conf);
fs.mkdirs(regionDir);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
index 7fa2a9e..0253531 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
@@ -274,7 +274,7 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase {
MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
Path tempdir = mfs.getTempDir();
Path tableDir = FSUtils.getTableDir(tempdir, regionInfo.getTable());
- Path regionDir = FSUtils.getRegionDir(tableDir, regionInfo);
+ Path regionDir = FSUtils.getRegionDirFromTableDir(tableDir, regionInfo);
FileSystem fs = FileSystem.get(conf);
fs.mkdirs(regionDir);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
index 11f8f59..db19ebe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
@@ -132,7 +132,7 @@ public class TestRecoveredEdits {
// There should be no store files.
assertTrue(storeFiles.isEmpty());
region.close();
- Path regionDir = region.getRegionDir(hbaseRootDir, hri);
+ Path regionDir = FSUtils.getRegionDirFromRootDir(hbaseRootDir, hri);
Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir);
// This is a little fragile getting this path to a file of 10M of edits.
Path recoveredEditsFile = new Path(