You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2021/07/26 14:05:20 UTC

[hbase] branch HBASE-24950 updated (82f3aa4 -> b6c54e5)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a change to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git.


 discard 82f3aa4  HBASE-25013 Avoid reset the backup master root cache every time when syncing (#2392)
 discard ca62baf  HBASE-24929 Introduce a special CellComparator for master local region (#2378)
 discard 75949d5  HBASE-24607 Implement CatalogJanitor for 'root table' (#2377)
 discard 44050c3  HBASE-24606 Implement meta merge (#2311)
 discard cf66204  HBASE-24391 Implement meta split (#2010)
 discard 84f3692  HBASE-24459 Move the locateMeta logic from AsyncMetaRegionTableLocator to ConnectionRegistry (#2095)
 discard b0a6e61  HBASE-24390 Remove RegionInfoBuilder.FIRST_META_REGIONINFO (#1877)
 discard 12bff02  HBASE-24389 Introduce new master rpc methods to locate meta region through root region (#1774)
 discard 4de5872  HBASE-24388 Store the locations of meta regions in master local store (#1746)
     add 9e27de6  HBASE-24734 RegionInfo#containsRange should support check meta table (#3496)
     add 3c70bc1  HBASE-26107 MOB compaction with missing files catches incorrect exception (#3511)
     add 0f787af  HBASE-26110: Add download links for 1.7.1 (#3514)
     add be2c97e  HBASE-25521 Change ChoreService and ScheduledChore to IA.Private (#3505)
     add d15f3cb  HBASE-26108 add option to disable scanMetrics in TableSnapshotInputFormat (#3516)
     add 51ed95c  HBASE-26071: Document HBASE-26021 and upgrade considerations for 1.7.0/1.7.1 (#3469)
     add 0294c73  HBASE-26093 Replication is stuck due to zero length wal file in oldWALs directory (#3504)
     add 8ae3942  HBASE-26091 Remove FirstKeyValueMatchingQualifiersFilter (#3497)
     add f0324a7  HBASE-26119 Polish TestAsyncNonMetaRegionLocator (#3526)
     add c74366c  HBASE-26049 Remove DfsBuilderUtility (#3444)
     add 4a3c7d7  HBASE-21946 Use ByteBuffer pread instead of byte[] pread in HFileBlock when applicable (#3434)
     add 02d263e  HBASE-26118 The HStore.commitFile and HStore.moveFileIntoPlace almost have the same logic (#3525)
     new 13b44d0  HBASE-24388 Store the locations of meta regions in master local store (#1746)
     new 48be0e0  HBASE-24389 Introduce new master rpc methods to locate meta region through root region (#1774)
     new 9959293  HBASE-24390 Remove RegionInfoBuilder.FIRST_META_REGIONINFO (#1877)
     new cf0fcca  HBASE-24459 Move the locateMeta logic from AsyncMetaRegionTableLocator to ConnectionRegistry (#2095)
     new 981d9d0  HBASE-24391 Implement meta split (#2010)
     new d96c610  HBASE-24606 Implement meta merge (#2311)
     new 75ac29c  HBASE-24607 Implement CatalogJanitor for 'root table' (#2377)
     new 2cc9cb9  HBASE-24929 Introduce a special CellComparator for master local region (#2378)
     new b6c54e5  HBASE-25013 Avoid reset the backup master root cache every time when syncing (#2392)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (82f3aa4)
            \
             N -- N -- N   refs/heads/HBASE-24950 (b6c54e5)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 9 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/hbase/client/MutableRegionInfo.java     |  14 ++-
 .../FirstKeyValueMatchingQualifiersFilter.java     | 106 +---------------
 .../hadoop/hbase/client/TestRegionInfoBuilder.java |  34 ++++++
 .../org/apache/hadoop/hbase/CellComparator.java    |  12 ++
 .../java/org/apache/hadoop/hbase/ChoreService.java |   2 +-
 .../apache/hadoop/hbase/MetaCellComparator.java    |   5 +
 .../org/apache/hadoop/hbase/ScheduledChore.java    |   2 +-
 .../apache/hadoop/hbase/io/util/BlockIOUtils.java  |  82 ++++++++++++-
 .../apache/hadoop/hbase/util/CommonFSUtils.java    |  74 ------------
 .../mapreduce/TableSnapshotInputFormatImpl.java    |  13 +-
 hbase-procedure/pom.xml                            |   4 +
 .../procedure2/store/wal/WALProcedureStore.java    |  10 +-
 .../src/main/protobuf/client/Filter.proto          |   2 +
 .../hadoop/hbase/mob/DefaultMobStoreCompactor.java |   8 +-
 .../apache/hadoop/hbase/regionserver/HStore.java   | 134 ++++++++++-----------
 .../hbase/regionserver/wal/ProtobufLogWriter.java  |  17 ++-
 .../regionserver/ReplicationSourceWALReader.java   |  15 ++-
 .../replication/regionserver/WALEntryStream.java   |  35 +-----
 .../hadoop/hbase/wal/AbstractFSWALProvider.java    |  38 +++++-
 .../hbase/TestPartialResultsFromClientSide.java    |  10 +-
 .../client/TestAsyncNonMetaRegionLocator.java      |  81 +++++++------
 .../hbase/filter/TestFilterSerialization.java      |  20 ---
 .../TestFirstKeyValueMatchingQualifiersFilter.java |  81 -------------
 .../hadoop/hbase/io/hfile/TestBlockIOUtils.java    |  94 +++++++++++++++
 .../hadoop/hbase/mob/FaultyMobStoreCompactor.java  |  10 +-
 .../hadoop/hbase/regionserver/TestCompaction.java  |  37 ++----
 .../hadoop/hbase/regionserver/TestRegionInfo.java  |  34 ++++++
 .../regionserver/TestBasicWALEntryStream.java      |  46 +++++++
 src/main/asciidoc/_chapters/upgrading.adoc         |  22 ++++
 src/site/xdoc/downloads.xml                        |  14 +--
 30 files changed, 575 insertions(+), 481 deletions(-)
 delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java

[hbase] 04/09: HBASE-24459 Move the locateMeta logic from AsyncMetaRegionTableLocator to ConnectionRegistry (#2095)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit cf0fcca58f4d6fdfd3bffbfe26820e4d1336c5bd
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Aug 25 21:14:28 2020 +0800

    HBASE-24459 Move the locateMeta logic from AsyncMetaRegionTableLocator to ConnectionRegistry (#2095)
    
    Signed-off-by: Viraj Jasani <vj...@apache.org>
    Signed-off-by: Bharath Vissapragada <bh...@apache.org>
---
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |  27 +--
 .../hbase/client/AsyncMetaTableRegionLocator.java  | 109 +--------
 .../hadoop/hbase/client/ConnectionRegistry.java    |  21 +-
 .../hadoop/hbase/client/ConnectionUtils.java       | 247 +++++++++++++++-----
 .../apache/hadoop/hbase/client/MasterRegistry.java |  41 +++-
 .../hbase/client/TableRegionLocationCache.java     |  64 +-----
 .../hadoop/hbase/client/ZKConnectionRegistry.java  |  77 ++++++-
 .../hbase/client/DoNothingConnectionRegistry.java  |  15 ++
 .../apache/hadoop/hbase/MetaCellComparator.java    |   5 +-
 .../hbase/client/AsyncClusterConnection.java       |  10 +-
 .../hbase/client/AsyncClusterConnectionImpl.java   |  30 ++-
 .../org/apache/hadoop/hbase/master/HMaster.java    |  63 ++++--
 .../hadoop/hbase/master/MasterRpcServices.java     |  66 ++++--
 .../hadoop/hbase/master/MetaLocationCache.java     | 160 +++++++++++++
 .../hbase/master/MetaRegionLocationCache.java      | 252 ---------------------
 .../hadoop/hbase/regionserver/HRegionServer.java   |   2 +-
 .../hbase/client/DummyAsyncClusterConnection.java  |   6 +
 .../hbase/client/DummyConnectionRegistry.java      |  16 +-
 .../hadoop/hbase/client/TestMasterRegistry.java    |  27 ---
 .../hbase/client/TestMetaRegionLocationCache.java  | 202 -----------------
 .../hbase/client/TestZKConnectionRegistry.java     |   2 +-
 .../hbase/master/TestClientMetaServiceRPCs.java    | 135 ++++++++---
 .../hadoop/hbase/master/TestMetaLocationCache.java | 178 +++++++++++++++
 .../TestRegionAssignedToMultipleRegionServers.java |   1 -
 ...tReportRegionStateTransitionFromDeadServer.java |   1 -
 .../master/assignment/TestSCPGetRegionsRace.java   |   1 -
 .../assignment/TestWakeUpUnexpectedProcedure.java  |   1 -
 27 files changed, 940 insertions(+), 819 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 5c24d98..1be281f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -44,7 +44,6 @@ import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.ChoreService;
-import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
@@ -275,10 +274,6 @@ class AsyncConnectionImpl implements AsyncConnection {
       () -> createRegionServerStub(serverName));
   }
 
-  private MasterService.Interface createMasterStub(ServerName serverName) throws IOException {
-    return MasterService.newStub(rpcClient.createRpcChannel(serverName, user, rpcTimeout));
-  }
-
   private AdminService.Interface createAdminServerStub(ServerName serverName) throws IOException {
     return AdminService.newStub(rpcClient.createRpcChannel(serverName, user, rpcTimeout));
   }
@@ -290,26 +285,8 @@ class AsyncConnectionImpl implements AsyncConnection {
   }
 
   CompletableFuture<MasterService.Interface> getMasterStub() {
-    return ConnectionUtils.getOrFetch(masterStub, masterStubMakeFuture, false, () -> {
-      CompletableFuture<MasterService.Interface> future = new CompletableFuture<>();
-      addListener(registry.getActiveMaster(), (addr, error) -> {
-        if (error != null) {
-          future.completeExceptionally(error);
-        } else if (addr == null) {
-          future.completeExceptionally(new MasterNotRunningException(
-            "ZooKeeper available but no active master location found"));
-        } else {
-          LOG.debug("The fetched master address is {}", addr);
-          try {
-            future.complete(createMasterStub(addr));
-          } catch (IOException e) {
-            future.completeExceptionally(e);
-          }
-        }
-
-      });
-      return future;
-    }, stub -> true, "master stub");
+    return ConnectionUtils.getMasterStub(registry, masterStub, masterStubMakeFuture, rpcClient,
+      user, rpcTimeout, TimeUnit.MILLISECONDS, MasterService::newStub, "MasterService");
   }
 
   String getClusterId() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaTableRegionLocator.java
index c8ffa24..a1ccb8c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaTableRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaTableRegionLocator.java
@@ -19,32 +19,12 @@ package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
-import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaCellComparator;
-import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
-
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService.Interface;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionRequest;
 
 /**
  * The class for locating region for meta table.
@@ -52,104 +32,27 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMeta
 @InterfaceAudience.Private
 class AsyncMetaTableRegionLocator extends AbstractAsyncTableRegionLocator {
 
-  private static final Logger LOG = LoggerFactory.getLogger(AsyncMetaTableRegionLocator.class);
-
-  private final AtomicReference<Interface> stub = new AtomicReference<>();
-
-  private final AtomicReference<CompletableFuture<Interface>> stubMakeFuture =
-    new AtomicReference<>();
-
   AsyncMetaTableRegionLocator(AsyncConnectionImpl conn, TableName tableName, int maxConcurrent) {
     // for meta region we should use MetaCellComparator to compare the row keys
-    super(conn, tableName, maxConcurrent, (r1, r2) -> MetaCellComparator
-      .compareRows(r1, 0, r1.length, r2, 0, r2.length));
-  }
-
-  private Interface createStub(ServerName serverName) throws IOException {
-    return ClientMetaService.newStub(conn.rpcClient.createRpcChannel(serverName, conn.user,
-      (int) TimeUnit.NANOSECONDS.toMillis(conn.connConf.getReadRpcTimeoutNs())));
-  }
-
-  CompletableFuture<Interface> getStub() {
-    return ConnectionUtils.getOrFetch(stub, stubMakeFuture, false, () -> {
-      CompletableFuture<Interface> future = new CompletableFuture<>();
-      addListener(conn.registry.getActiveMaster(), (addr, error) -> {
-        if (error != null) {
-          future.completeExceptionally(error);
-        } else if (addr == null) {
-          future.completeExceptionally(new MasterNotRunningException(
-            "ZooKeeper available but no active master location found"));
-        } else {
-          LOG.debug("The fetched master address is {}", addr);
-          try {
-            future.complete(createStub(addr));
-          } catch (IOException e) {
-            future.completeExceptionally(e);
-          }
-        }
-
-      });
-      return future;
-    }, stub -> true, "ClientLocateMetaStub");
-  }
-
-  private void tryClearMasterStubCache(IOException error, Interface currentStub) {
-    if (ClientExceptionsUtil.isConnectionException(error) ||
-      error instanceof ServerNotRunningYetException) {
-      stub.compareAndSet(currentStub, null);
-    }
+    super(conn, tableName, maxConcurrent, MetaCellComparator.ROW_COMPARATOR);
   }
 
   @Override
   protected void locate(LocateRequest req) {
-    addListener(getStub(), (stub, error) -> {
+    addListener(conn.registry.locateMeta(req.row, req.locateType), (locs, error) -> {
       if (error != null) {
         onLocateComplete(req, null, error);
         return;
       }
-      HBaseRpcController controller = conn.rpcControllerFactory.newController();
-      stub.locateMetaRegion(controller,
-        LocateMetaRegionRequest.newBuilder().setRow(ByteString.copyFrom(req.row))
-          .setLocateType(ProtobufUtil.toProtoRegionLocateType(req.locateType)).build(),
-        resp -> {
-          if (controller.failed()) {
-            IOException ex = controller.getFailed();
-            tryClearMasterStubCache(ex, stub);
-            onLocateComplete(req, null, ex);
-            return;
-          }
-          RegionLocations locs = new RegionLocations(resp.getMetaLocationsList().stream()
-            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList()));
-          if (validateRegionLocations(locs, req)) {
-            onLocateComplete(req, locs, null);
-          }
-        });
+      if (validateRegionLocations(locs, req)) {
+        onLocateComplete(req, locs, null);
+      }
     });
   }
 
   @Override
   CompletableFuture<List<HRegionLocation>>
     getAllRegionLocations(boolean excludeOfflinedSplitParents) {
-    CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
-    addListener(getStub(), (stub, error) -> {
-      if (error != null) {
-        future.completeExceptionally(error);
-        return;
-      }
-      HBaseRpcController controller = conn.rpcControllerFactory.newController();
-      stub.getAllMetaRegionLocations(controller, GetAllMetaRegionLocationsRequest.newBuilder()
-        .setExcludeOfflinedSplitParents(excludeOfflinedSplitParents).build(), resp -> {
-          if (controller.failed()) {
-            IOException ex = controller.getFailed();
-            tryClearMasterStubCache(ex, stub);
-            future.completeExceptionally(ex);
-            return;
-          }
-          List<HRegionLocation> locs = resp.getMetaLocationsList().stream()
-            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList());
-          future.complete(locs);
-        });
-    });
-    return future;
+    return conn.registry.getAllMetaRegionLocations(excludeOfflinedSplitParents);
   }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
index 569d728..1222268 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
@@ -18,21 +18,36 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.Closeable;
+import java.util.List;
 import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Registry for meta information needed for connection setup to a HBase cluster. Implementations
- * hold cluster information such as this cluster's id, location of hbase:meta, etc..
- * Internal use only.
+ * hold cluster information such as this cluster's id, location of hbase:meta, etc.. Internal use
+ * only.
  */
 @InterfaceAudience.Private
 interface ConnectionRegistry extends Closeable {
 
   /**
+   * Get location of meta region for the given {@code row}.
+   */
+  CompletableFuture<RegionLocations> locateMeta(byte[] row, RegionLocateType locateType);
+
+  /**
+   * Get all meta region locations, including the location of secondary regions.
+   * @param excludeOfflinedSplitParents whether to include split parent.
+   */
+  CompletableFuture<List<HRegionLocation>>
+    getAllMetaRegionLocations(boolean excludeOfflinedSplitParents);
+
+  /**
    * Should only be called once.
-   * <p>
+   * <p/>
    * The upper layer should store this value somewhere as it will not be change any more.
    */
   CompletableFuture<String> getClusterId();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index d74e4aa..1433c55 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -24,11 +24,11 @@ import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
@@ -36,20 +36,26 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Function;
-import java.util.function.Predicate;
 import java.util.function.Supplier;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.ipc.RemoteException;
@@ -60,6 +66,7 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 import org.apache.hbase.thirdparty.io.netty.util.Timer;
@@ -69,6 +76,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsRequest;
 
 /**
  * Utility used by client connections.
@@ -115,7 +124,7 @@ public final class ConnectionUtils {
    * @param log Used to log what we set in here.
    */
   public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
-      final Logger log) {
+    final Logger log) {
     // TODO: Fix this. Not all connections from server side should have 10 times the retries.
     int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
@@ -180,11 +189,18 @@ public final class ConnectionUtils {
     return Bytes.equals(row, EMPTY_END_ROW);
   }
 
+  private static int nanosToMillis(long nanos) {
+    return toIntNoOverflow(TimeUnit.NANOSECONDS.toMillis(nanos));
+  }
+
+  private static int toIntNoOverflow(long value) {
+    return (int) Math.min(Integer.MAX_VALUE, value);
+  }
+
   static void resetController(HBaseRpcController controller, long timeoutNs, int priority) {
     controller.reset();
     if (timeoutNs >= 0) {
-      controller.setCallTimeout(
-        (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(timeoutNs)));
+      controller.setCallTimeout(nanosToMillis(timeoutNs));
     }
     controller.setPriority(priority);
   }
@@ -341,7 +357,7 @@ public final class ConnectionUtils {
   }
 
   static void updateResultsMetrics(ScanMetrics scanMetrics, Result[] rrs,
-      boolean isRegionServerRemote) {
+    boolean isRegionServerRemote) {
     if (scanMetrics == null || rrs == null || rrs.length == 0) {
       return;
     }
@@ -384,7 +400,7 @@ public final class ConnectionUtils {
    * increase the hedge read related metrics.
    */
   private static <T> void connect(CompletableFuture<T> srcFuture, CompletableFuture<T> dstFuture,
-      Optional<MetricsConnection> metrics) {
+    Optional<MetricsConnection> metrics) {
     addListener(srcFuture, (r, e) -> {
       if (e != null) {
         dstFuture.completeExceptionally(e);
@@ -403,8 +419,8 @@ public final class ConnectionUtils {
   }
 
   private static <T> void sendRequestsToSecondaryReplicas(
-      Function<Integer, CompletableFuture<T>> requestReplica, RegionLocations locs,
-      CompletableFuture<T> future, Optional<MetricsConnection> metrics) {
+    Function<Integer, CompletableFuture<T>> requestReplica, RegionLocations locs,
+    CompletableFuture<T> future, Optional<MetricsConnection> metrics) {
     if (future.isDone()) {
       // do not send requests to secondary replicas if the future is done, i.e, the primary request
       // has already been finished.
@@ -418,9 +434,9 @@ public final class ConnectionUtils {
   }
 
   static <T> CompletableFuture<T> timelineConsistentRead(AsyncRegionLocator locator,
-      TableName tableName, Query query, byte[] row, RegionLocateType locateType,
-      Function<Integer, CompletableFuture<T>> requestReplica, long rpcTimeoutNs,
-      long primaryCallTimeoutNs, Timer retryTimer, Optional<MetricsConnection> metrics) {
+    TableName tableName, Query query, byte[] row, RegionLocateType locateType,
+    Function<Integer, CompletableFuture<T>> requestReplica, long rpcTimeoutNs,
+    long primaryCallTimeoutNs, Timer retryTimer, Optional<MetricsConnection> metrics) {
     if (query.getConsistency() != Consistency.TIMELINE) {
       return requestReplica.apply(RegionReplicaUtil.DEFAULT_REPLICA_ID);
     }
@@ -515,52 +531,8 @@ public final class ConnectionUtils {
     }
   }
 
-  static <T> CompletableFuture<T> getOrFetch(AtomicReference<T> cacheRef,
-      AtomicReference<CompletableFuture<T>> futureRef, boolean reload,
-      Supplier<CompletableFuture<T>> fetch, Predicate<T> validator, String type) {
-    for (;;) {
-      if (!reload) {
-        T value = cacheRef.get();
-        if (value != null && validator.test(value)) {
-          return CompletableFuture.completedFuture(value);
-        }
-      }
-      LOG.trace("{} cache is null, try fetching from registry", type);
-      if (futureRef.compareAndSet(null, new CompletableFuture<>())) {
-        LOG.debug("Start fetching {} from registry", type);
-        CompletableFuture<T> future = futureRef.get();
-        addListener(fetch.get(), (value, error) -> {
-          if (error != null) {
-            LOG.debug("Failed to fetch {} from registry", type, error);
-            futureRef.getAndSet(null).completeExceptionally(error);
-            return;
-          }
-          LOG.debug("The fetched {} is {}", type, value);
-          // Here we update cache before reset future, so it is possible that someone can get a
-          // stale value. Consider this:
-          // 1. update cacheRef
-          // 2. someone clears the cache and relocates again
-          // 3. the futureRef is not null so the old future is used.
-          // 4. we clear futureRef and complete the future in it with the value being
-          // cleared in step 2.
-          // But we do not think it is a big deal as it rarely happens, and even if it happens, the
-          // caller will retry again later, no correctness problems.
-          cacheRef.set(value);
-          futureRef.set(null);
-          future.complete(value);
-        });
-        return future;
-      } else {
-        CompletableFuture<T> future = futureRef.get();
-        if (future != null) {
-          return future;
-        }
-      }
-    }
-  }
-
   static void updateStats(Optional<ServerStatisticTracker> optStats,
-      Optional<MetricsConnection> optMetrics, ServerName serverName, MultiResponse resp) {
+    Optional<MetricsConnection> optMetrics, ServerName serverName, MultiResponse resp) {
     if (!optStats.isPresent() && !optMetrics.isPresent()) {
       // ServerStatisticTracker and MetricsConnection are both not present, just return
       return;
@@ -588,13 +560,13 @@ public final class ConnectionUtils {
   @FunctionalInterface
   interface RpcCall<RESP, REQ> {
     void call(ClientService.Interface stub, HBaseRpcController controller, REQ req,
-        RpcCallback<RESP> done);
+      RpcCallback<RESP> done);
   }
 
   static <REQ, PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller,
-      HRegionLocation loc, ClientService.Interface stub, REQ req,
-      Converter<PREQ, byte[], REQ> reqConvert, RpcCall<PRESP, PREQ> rpcCall,
-      Converter<RESP, HBaseRpcController, PRESP> respConverter) {
+    HRegionLocation loc, ClientService.Interface stub, REQ req,
+    Converter<PREQ, byte[], REQ> reqConvert, RpcCall<PRESP, PREQ> rpcCall,
+    Converter<RESP, HBaseRpcController, PRESP> respConverter) {
     CompletableFuture<RESP> future = new CompletableFuture<>();
     try {
       rpcCall.call(stub, controller, reqConvert.convert(loc.getRegion().getRegionName(), req),
@@ -646,4 +618,155 @@ public final class ConnectionUtils {
       controller.setFailed(error.toString());
     }
   }
+
+  public static RegionLocations locateRow(NavigableMap<byte[], RegionLocations> cache,
+    TableName tableName, byte[] row, int replicaId) {
+    Map.Entry<byte[], RegionLocations> entry = cache.floorEntry(row);
+    if (entry == null) {
+      return null;
+    }
+    RegionLocations locs = entry.getValue();
+    HRegionLocation loc = locs.getRegionLocation(replicaId);
+    if (loc == null) {
+      return null;
+    }
+    byte[] endKey = loc.getRegion().getEndKey();
+    if (isEmptyStopRow(endKey) || Bytes.compareTo(row, endKey) < 0) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
+          Bytes.toStringBinary(row), RegionLocateType.CURRENT, replicaId);
+      }
+      return locs;
+    } else {
+      return null;
+    }
+  }
+
+  public static RegionLocations locateRowBefore(NavigableMap<byte[], RegionLocations> cache,
+    TableName tableName, byte[] row, int replicaId) {
+    boolean isEmptyStopRow = isEmptyStopRow(row);
+    Map.Entry<byte[], RegionLocations> entry =
+      isEmptyStopRow ? cache.lastEntry() : cache.lowerEntry(row);
+    if (entry == null) {
+      return null;
+    }
+    RegionLocations locs = entry.getValue();
+    HRegionLocation loc = locs.getRegionLocation(replicaId);
+    if (loc == null) {
+      return null;
+    }
+    if (isEmptyStopRow(loc.getRegion().getEndKey()) ||
+      (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
+          Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId);
+      }
+      return locs;
+    } else {
+      return null;
+    }
+  }
+
+  public static void tryClearMasterStubCache(IOException error,
+    ClientMetaService.Interface currentStub, AtomicReference<ClientMetaService.Interface> stub) {
+    if (ClientExceptionsUtil.isConnectionException(error) ||
+      error instanceof ServerNotRunningYetException) {
+      stub.compareAndSet(currentStub, null);
+    }
+  }
+
+  public static <T> CompletableFuture<T> getMasterStub(ConnectionRegistry registry,
+    AtomicReference<T> stub, AtomicReference<CompletableFuture<T>> stubMakeFuture,
+    RpcClient rpcClient, User user, long rpcTimeout, TimeUnit unit,
+    Function<RpcChannel, T> stubMaker, String type) {
+    return getOrFetch(stub, stubMakeFuture, () -> {
+      CompletableFuture<T> future = new CompletableFuture<>();
+      addListener(registry.getActiveMaster(), (addr, error) -> {
+        if (error != null) {
+          future.completeExceptionally(error);
+        } else if (addr == null) {
+          future.completeExceptionally(new MasterNotRunningException(
+            "ZooKeeper available but no active master location found"));
+        } else {
+          LOG.debug("The fetched master address is {}", addr);
+          future.complete(stubMaker.apply(
+            rpcClient.createRpcChannel(addr, user, toIntNoOverflow(unit.toMillis(rpcTimeout)))));
+        }
+      });
+      return future;
+    }, type);
+  }
+
+  private static <T> CompletableFuture<T> getOrFetch(AtomicReference<T> cachedRef,
+    AtomicReference<CompletableFuture<T>> futureRef, Supplier<CompletableFuture<T>> fetch,
+    String type) {
+    for (;;) {
+      T cachedValue = cachedRef.get();
+      if (cachedValue != null) {
+        return CompletableFuture.completedFuture(cachedValue);
+      }
+      LOG.trace("{} cache is null, try fetching from registry", type);
+      if (futureRef.compareAndSet(null, new CompletableFuture<>())) {
+        LOG.debug("Start fetching {} from registry", type);
+        CompletableFuture<T> future = futureRef.get();
+        addListener(fetch.get(), (value, error) -> {
+          if (error != null) {
+            LOG.debug("Failed to fetch {} from registry", type, error);
+            futureRef.getAndSet(null).completeExceptionally(error);
+            return;
+          }
+          LOG.debug("The fetched {} is {}", type, value);
+          // Here we update cache before reset future, so it is possible that someone can get a
+          // stale value. Consider this:
+          // 1. update cacheRef
+          // 2. someone clears the cache and relocates again
+          // 3. the futureRef is not null so the old future is used.
+          // 4. we clear futureRef and complete the future in it with the value being
+          // cleared in step 2.
+          // But we do not think it is a big deal as it rarely happens, and even if it happens, the
+          // caller will retry again later, no correctness problems.
+          cachedRef.set(value);
+          futureRef.set(null);
+          future.complete(value);
+        });
+        return future;
+      } else {
+        CompletableFuture<T> future = futureRef.get();
+        if (future != null) {
+          return future;
+        }
+      }
+    }
+  }
+
+  public static CompletableFuture<List<HRegionLocation>> getAllMetaRegionLocations(
+    boolean excludeOfflinedSplitParents,
+    CompletableFuture<ClientMetaService.Interface> getStubFuture,
+    AtomicReference<ClientMetaService.Interface> stubRef,
+    RpcControllerFactory rpcControllerFactory, int callTimeoutMs) {
+    CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
+    addListener(getStubFuture, (stub, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      HBaseRpcController controller = rpcControllerFactory.newController();
+      if (callTimeoutMs > 0) {
+        controller.setCallTimeout(callTimeoutMs);
+      }
+      stub.getAllMetaRegionLocations(controller, GetAllMetaRegionLocationsRequest.newBuilder()
+        .setExcludeOfflinedSplitParents(excludeOfflinedSplitParents).build(), resp -> {
+          if (controller.failed()) {
+            IOException ex = controller.getFailed();
+            tryClearMasterStubCache(ex, stub, stubRef);
+            future.completeExceptionally(ex);
+            return;
+          }
+          List<HRegionLocation> locs = resp.getMetaLocationsList().stream()
+            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList());
+          future.complete(locs);
+        });
+    });
+    return future;
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
index 6caa8d5..3475f74 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
@@ -55,11 +55,14 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hbase.thirdparty.com.google.common.base.Strings;
 import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hbase.thirdparty.com.google.protobuf.Message;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMastersRequest;
@@ -67,6 +70,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMasters
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMastersResponseEntry;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionResponse;
 
 /**
  * Master based registry implementation. Makes RPCs to the configured master addresses from config
@@ -125,7 +130,7 @@ public class MasterRegistry implements ConnectionRegistry {
     rpcTimeoutMs = (int) Math.min(Integer.MAX_VALUE,
       conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
     // XXX: we pass cluster id as null here since we do not have a cluster id yet, we have to fetch
-    // this through the master registry...
+    // this through the connection registry...
     // This is a problem as we will use the cluster id to determine the authentication method
     rpcClient = RpcClientFactory.createClient(conf, null);
     rpcControllerFactory = RpcControllerFactory.instantiate(conf);
@@ -353,4 +358,36 @@ public class MasterRegistry implements ConnectionRegistry {
       }
     }, "MasterRegistry.close");
   }
-}
+
+  private RegionLocations transformRegionLocations(LocateMetaRegionResponse resp) {
+    return new RegionLocations(resp.getMetaLocationsList().stream()
+      .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList()));
+  }
+
+  @Override
+  public CompletableFuture<RegionLocations> locateMeta(byte[] row, RegionLocateType locateType) {
+    LocateMetaRegionRequest request =
+      LocateMetaRegionRequest.newBuilder().setRow(ByteString.copyFrom(row))
+        .setLocateType(ProtobufUtil.toProtoRegionLocateType(locateType)).build();
+    return this.<LocateMetaRegionResponse> call((c, s, d) -> s.locateMetaRegion(c, request, d),
+      r -> true, "locateMeta()").thenApply(this::transformRegionLocations);
+  }
+
+  private List<HRegionLocation>
+    transformRegionLocationList(GetAllMetaRegionLocationsResponse resp) {
+    return resp.getMetaLocationsList().stream().map(ProtobufUtil::toRegionLocation)
+      .collect(Collectors.toList());
+  }
+
+  @Override
+  public CompletableFuture<List<HRegionLocation>>
+    getAllMetaRegionLocations(boolean excludeOfflinedSplitParents) {
+    GetAllMetaRegionLocationsRequest request = GetAllMetaRegionLocationsRequest.newBuilder()
+      .setExcludeOfflinedSplitParents(excludeOfflinedSplitParents).build();
+    return this
+      .<GetAllMetaRegionLocationsResponse> call(
+        (c, s, d) -> s.getAllMetaRegionLocations(c, request, d), r -> true,
+        "getAllMetaRegionLocations(" + excludeOfflinedSplitParents + ")")
+      .thenApply(this::transformRegionLocationList);
+  }
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java
index 0745796..ed7cb90 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.hbase.client;
 import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.canUpdateOnError;
 import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isEqual;
 import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.removeRegionLocation;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.locateRow;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.locateRowBefore;
 
 import com.google.errorprone.annotations.RestrictedApi;
 import java.util.Comparator;
@@ -66,64 +67,17 @@ class TableRegionLocationCache {
     metrics.ifPresent(MetricsConnection::incrMetaCacheNumClearRegion);
   }
 
-  private RegionLocations locateRow(TableName tableName, byte[] row, int replicaId) {
-    Map.Entry<byte[], RegionLocations> entry = cache.floorEntry(row);
-    if (entry == null) {
-      recordCacheMiss();
-      return null;
-    }
-    RegionLocations locs = entry.getValue();
-    HRegionLocation loc = locs.getRegionLocation(replicaId);
-    if (loc == null) {
-      recordCacheMiss();
-      return null;
-    }
-    byte[] endKey = loc.getRegion().getEndKey();
-    if (isEmptyStopRow(endKey) || Bytes.compareTo(row, endKey) < 0) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
-          Bytes.toStringBinary(row), RegionLocateType.CURRENT, replicaId);
-      }
-      recordCacheHit();
-      return locs;
-    } else {
-      recordCacheMiss();
-      return null;
-    }
-  }
-
-  private RegionLocations locateRowBefore(TableName tableName, byte[] row, int replicaId) {
-    boolean isEmptyStopRow = isEmptyStopRow(row);
-    Map.Entry<byte[], RegionLocations> entry =
-      isEmptyStopRow ? cache.lastEntry() : cache.lowerEntry(row);
-    if (entry == null) {
-      recordCacheMiss();
-      return null;
-    }
-    RegionLocations locs = entry.getValue();
-    HRegionLocation loc = locs.getRegionLocation(replicaId);
-    if (loc == null) {
-      recordCacheMiss();
-      return null;
-    }
-    if (isEmptyStopRow(loc.getRegion().getEndKey()) ||
-      (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
-          Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId);
-      }
+  RegionLocations locate(TableName tableName, byte[] row, int replicaId,
+    RegionLocateType locateType) {
+    RegionLocations locs = locateType.equals(RegionLocateType.BEFORE) ?
+      locateRowBefore(cache, tableName, row, replicaId) :
+      locateRow(cache, tableName, row, replicaId);
+    if (locs != null) {
       recordCacheHit();
-      return locs;
     } else {
       recordCacheMiss();
-      return null;
     }
-  }
-
-  RegionLocations locate(TableName tableName, byte[] row, int replicaId,
-    RegionLocateType locateType) {
-    return locateType.equals(RegionLocateType.BEFORE) ? locateRowBefore(tableName, row, replicaId) :
-      locateRow(tableName, row, replicaId);
+    return locs;
   }
 
   // if we successfully add the locations to cache, return the locations, otherwise return the one
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
index c79e9d6..66f9684 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
+import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
 import static org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.lengthOfPBMagic;
 import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture;
@@ -26,6 +29,8 @@ import static org.apache.hadoop.hbase.zookeeper.ZKMetadata.removeMetaData;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.mutable.MutableInt;
 import org.apache.hadoop.conf.Configuration;
@@ -35,7 +40,12 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.ipc.RpcClientFactory;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
@@ -43,7 +53,12 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
 
 /**
@@ -58,9 +73,29 @@ class ZKConnectionRegistry implements ConnectionRegistry {
 
   private final ZNodePaths znodePaths;
 
-  ZKConnectionRegistry(Configuration conf) {
+  private final AtomicReference<ClientMetaService.Interface> cachedStub = new AtomicReference<>();
+
+  private final AtomicReference<CompletableFuture<ClientMetaService.Interface>> stubMakeFuture =
+    new AtomicReference<>();
+
+  // RPC client used to talk to the masters.
+  private final RpcClient rpcClient;
+  private final RpcControllerFactory rpcControllerFactory;
+  private final long readRpcTimeoutNs;
+  private final User user;
+
+  ZKConnectionRegistry(Configuration conf) throws IOException {
     this.znodePaths = new ZNodePaths(conf);
     this.zk = new ReadOnlyZKClient(conf);
+    // XXX: we pass cluster id as null here since we do not have a cluster id yet, we have to fetch
+    // this through the connection registry...
+    // This is a problem as we will use the cluster id to determine the authentication method
+    rpcClient = RpcClientFactory.createClient(conf, null);
+    rpcControllerFactory = RpcControllerFactory.instantiate(conf);
+    long rpcTimeoutMs = conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT);
+    this.readRpcTimeoutNs =
+      TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY, rpcTimeoutMs));
+    this.user = User.getCurrent();
   }
 
   private interface Converter<T> {
@@ -234,8 +269,48 @@ class ZKConnectionRegistry implements ConnectionRegistry {
       "ZKConnectionRegistry.getActiveMaster");
   }
 
+  private CompletableFuture<ClientMetaService.Interface> getStub() {
+    return ConnectionUtils.getMasterStub(this, cachedStub, stubMakeFuture, rpcClient, user,
+      readRpcTimeoutNs, TimeUnit.NANOSECONDS, ClientMetaService::newStub, "ClientMetaService");
+  }
+
+  @Override
+  public CompletableFuture<RegionLocations> locateMeta(byte[] row, RegionLocateType locateType) {
+    CompletableFuture<RegionLocations> future = new CompletableFuture<>();
+    addListener(getStub(), (stub, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      HBaseRpcController controller = rpcControllerFactory.newController();
+      stub.locateMetaRegion(controller,
+        LocateMetaRegionRequest.newBuilder().setRow(ByteString.copyFrom(row))
+          .setLocateType(ProtobufUtil.toProtoRegionLocateType(locateType)).build(),
+        resp -> {
+          if (controller.failed()) {
+            IOException ex = controller.getFailed();
+            ConnectionUtils.tryClearMasterStubCache(ex, stub, ZKConnectionRegistry.this.cachedStub);
+            future.completeExceptionally(ex);
+            return;
+          }
+          RegionLocations locs = new RegionLocations(resp.getMetaLocationsList().stream()
+            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList()));
+          future.complete(locs);
+        });
+    });
+    return future;
+  }
+
+  @Override
+  public CompletableFuture<List<HRegionLocation>>
+    getAllMetaRegionLocations(boolean excludeOfflinedSplitParents) {
+    return ConnectionUtils.getAllMetaRegionLocations(excludeOfflinedSplitParents, getStub(),
+      cachedStub, rpcControllerFactory, -1);
+  }
+
   @Override
   public void close() {
+    rpcClient.close();
     zk.close();
   }
 }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
index 64ded7f..68e9dd5 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.Collections;
+import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -44,4 +48,15 @@ class DoNothingConnectionRegistry implements ConnectionRegistry {
   @Override
   public void close() {
   }
+
+  @Override
+  public CompletableFuture<RegionLocations> locateMeta(byte[] row, RegionLocateType type) {
+    return CompletableFuture.completedFuture(null);
+  }
+
+  @Override
+  public CompletableFuture<List<HRegionLocation>>
+    getAllMetaRegionLocations(boolean excludeOfflinedSplitParents) {
+    return CompletableFuture.completedFuture(Collections.emptyList());
+  }
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java
index 783aed6..ce831fc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java
@@ -36,6 +36,9 @@ import org.apache.hbase.thirdparty.com.google.common.primitives.Longs;
 @InterfaceStability.Evolving
 public class MetaCellComparator extends CellComparatorImpl {
 
+  public static final Comparator<byte[]> ROW_COMPARATOR =
+    (r1, r2) -> compareRows(r1, 0, r1.length, r2, 0, r2.length);
+
   /**
    * A {@link MetaCellComparator} for <code>hbase:meta</code> catalog table
    * {@link KeyValue}s.
@@ -76,7 +79,7 @@ public class MetaCellComparator extends CellComparatorImpl {
     return ignoreSequenceid ? diff : Longs.compare(b.getSequenceId(), a.getSequenceId());
   }
 
-  public static int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset,
+  private static int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset,
       int rlength) {
     int leftDelimiter = Bytes.searchDelimiterIndex(left, loffset, llength, HConstants.DELIMITER);
     int rightDelimiter = Bytes.searchDelimiterIndex(right, roffset, rlength, HConstants.DELIMITER);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index 92118ac..8e64b4b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -61,13 +62,13 @@ public interface AsyncClusterConnection extends AsyncConnection {
    * original return value is useless.
    */
   CompletableFuture<Long> replay(TableName tableName, byte[] encodedRegionName, byte[] row,
-      List<Entry> entries, int replicaId, int numRetries, long operationTimeoutNs);
+    List<Entry> entries, int replicaId, int numRetries, long operationTimeoutNs);
 
   /**
    * Return all the replicas for a region. Used for region replica replication.
    */
   CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
-      boolean reload);
+    boolean reload);
 
   /**
    * Return the token for this bulk load.
@@ -98,4 +99,9 @@ public interface AsyncClusterConnection extends AsyncConnection {
    * Clean up after finishing bulk load, no matter success or not.
    */
   CompletableFuture<Void> cleanupBulkLoad(TableName tableName, String bulkToken);
+
+  /**
+   * Fetch all meta region locations from active master, used by backup masters for caching.
+   */
+  CompletableFuture<List<HRegionLocation>> getAllMetaRegionLocations(int callTimeoutMs);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
index 39fc3a2..cfe62db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hbase.client;
 import java.net.SocketAddress;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -43,6 +46,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBul
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
 
 /**
  * The implementation of AsyncClusterConnection.
@@ -50,8 +54,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpeci
 @InterfaceAudience.Private
 class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClusterConnection {
 
+  private final AtomicReference<ClientMetaService.Interface> cachedClientMetaStub =
+    new AtomicReference<>();
+
+  private final AtomicReference<CompletableFuture<ClientMetaService.Interface>>
+    clientMetaStubMakeFuture = new AtomicReference<>();
+
   public AsyncClusterConnectionImpl(Configuration conf, ConnectionRegistry registry,
-      String clusterId, SocketAddress localAddress, User user) {
+    String clusterId, SocketAddress localAddress, User user) {
     super(conf, registry, clusterId, localAddress, user);
   }
 
@@ -72,14 +82,14 @@ class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClu
 
   @Override
   public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
-      boolean writeFlushWALMarker) {
+    boolean writeFlushWALMarker) {
     RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin();
     return admin.flushRegionInternal(regionName, null, writeFlushWALMarker);
   }
 
   @Override
   public CompletableFuture<Long> replay(TableName tableName, byte[] encodedRegionName, byte[] row,
-      List<Entry> entries, int replicaId, int retries, long operationTimeoutNs) {
+    List<Entry> entries, int replicaId, int retries, long operationTimeoutNs) {
     return new AsyncRegionReplicaReplayRetryingCaller(RETRY_TIMER, this,
       ConnectionUtils.retries2Attempts(retries), operationTimeoutNs, tableName, encodedRegionName,
       row, entries, replicaId).call();
@@ -87,7 +97,7 @@ class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClu
 
   @Override
   public CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
-      boolean reload) {
+    boolean reload) {
     return getLocator().getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L);
   }
 
@@ -132,4 +142,16 @@ class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClu
           }, (s, c, req, done) -> s.cleanupBulkLoad(c, req, done), (c, resp) -> null))
       .call();
   }
+
+  private CompletableFuture<ClientMetaService.Interface> getClientMetaStub() {
+    return ConnectionUtils.getMasterStub(registry, cachedClientMetaStub, clientMetaStubMakeFuture,
+      rpcClient, user, rpcTimeout, TimeUnit.MILLISECONDS, ClientMetaService::newStub,
+      "ClientMetaService");
+  }
+
+  @Override
+  public CompletableFuture<List<HRegionLocation>> getAllMetaRegionLocations(int callTimeoutMs) {
+    return ConnectionUtils.getAllMetaRegionLocations(false, getClientMetaStub(),
+      cachedClientMetaStub, rpcControllerFactory, callTimeoutMs);
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 4e84960..79a3f7f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -237,6 +237,7 @@ import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Strings;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@@ -324,12 +325,11 @@ public class HMaster extends HRegionServer implements MasterServices {
   // manager of assignment nodes in zookeeper
   private AssignmentManager assignmentManager;
 
-
   /**
    * Cache for the meta region replica's locations. Also tracks their changes to avoid stale
    * cache entries.
    */
-  private final MetaRegionLocationCache metaRegionLocationCache;
+  private volatile MetaLocationCache metaLocationCache;
 
   private RSGroupInfoManager rsGroupInfoManager;
 
@@ -497,7 +497,7 @@ public class HMaster extends HRegionServer implements MasterServices {
         }
       }
 
-      this.metaRegionLocationCache = new MetaRegionLocationCache(this.zooKeeper);
+      this.metaLocationCache = new MetaLocationCache(this);
       this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this);
 
       cachedClusterId = new CachedClusterId(this, conf);
@@ -528,6 +528,25 @@ public class HMaster extends HRegionServer implements MasterServices {
   @Override
   public void run() {
     try {
+      // we have to do this in a background thread as for a fresh new cluster, we need to become
+      // active master first to set the cluster id so we can initialize the cluster connection.
+      // for backup master, we need to use async cluster connection to connect to active master for
+      // fetching the content of root table, to serve the locate meta requests from client.
+      Threads.setDaemonThreadRunning(new Thread(() -> {
+        for (;;) {
+          try {
+            if (!Strings.isNullOrEmpty(ZKClusterId.readClusterIdZNode(zooKeeper))) {
+              setupClusterConnection();
+              break;
+            } else {
+              LOG.trace("cluster id is still null, waiting...");
+            }
+          } catch (Throwable t) {
+            LOG.warn("failed to initialize cluster connection, retrying...");
+          }
+          Threads.sleep(1000);
+        }
+      }), getName() + ":initClusterConnection");
       Threads.setDaemonThreadRunning(new Thread(() -> {
         try {
           int infoPort = putUpJettyServer();
@@ -886,9 +905,7 @@ public class HMaster extends HRegionServer implements MasterServices {
    */
   private void finishActiveMasterInitialization(MonitoredTask status) throws IOException,
           InterruptedException, KeeperException, ReplicationException {
-    /*
-     * We are active master now... go initialize components we need to run.
-     */
+    // We are active master now... go initialize components we need to run.
     status.setStatus("Initializing Master file system");
 
     this.masterActiveTime = EnvironmentEdgeManager.currentTime();
@@ -932,6 +949,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     status.setStatus("Initialize ServerManager and schedule SCP for crash servers");
     // The below two managers must be created before loading procedures, as they will be used during
     // loading.
+    setupClusterConnection();
     this.serverManager = createServerManager(this);
     this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
     if (!conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK,
@@ -944,6 +962,10 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     tryMigrateRootTableFromZooKeeper();
 
+    // stop meta location cache, as now we do not need sync from active master any more.
+    metaLocationCache.stop("we are active master now");
+    metaLocationCache = null;
+
     createProcedureExecutor();
     Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType =
       procedureExecutor.getActiveProceduresNoCopy().stream()
@@ -1384,16 +1406,11 @@ public class HMaster extends HRegionServer implements MasterServices {
   /**
    * <p>
    * Create a {@link ServerManager} instance.
-   * </p>
-   * <p>
+   * <p/>
    * Will be overridden in tests.
-   * </p>
    */
   @InterfaceAudience.Private
   protected ServerManager createServerManager(final MasterServices master) throws IOException {
-    // We put this out here in a method so can do a Mockito.spy and stub it out
-    // w/ a mocked up ServerManager.
-    setupClusterConnection();
     return new ServerManager(master);
   }
 
@@ -2261,16 +2278,14 @@ public class HMaster extends HRegionServer implements MasterServices {
   private void startActiveMasterManager(int infoPort) throws KeeperException {
     String backupZNode = ZNodePaths.joinZNode(
       zooKeeper.getZNodePaths().backupMasterAddressesZNode, serverName.toString());
-    /*
-    * Add a ZNode for ourselves in the backup master directory since we
-    * may not become the active master. If so, we want the actual active
-    * master to know we are backup masters, so that it won't assign
-    * regions to us if so configured.
-    *
-    * If we become the active master later, ActiveMasterManager will delete
-    * this node explicitly.  If we crash before then, ZooKeeper will delete
-    * this node for us since it is ephemeral.
-    */
+    /**
+     * Add a ZNode for ourselves in the backup master directory since we may not become the active
+     * master. If so, we want the actual active master to know we are backup masters, so that it
+     * won't assign regions to us if so configured.
+     * <p/>
+     * If we become the active master later, ActiveMasterManager will delete this node explicitly.
+     * If we crash before then, ZooKeeper will delete this node for us since it is ephemeral.
+     */
     LOG.info("Adding backup master ZNode " + backupZNode);
     if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) {
       LOG.warn("Failed create of " + backupZNode + " by " + serverName);
@@ -3901,8 +3916,8 @@ public class HMaster extends HRegionServer implements MasterServices {
     }
   }
 
-  public MetaRegionLocationCache getMetaRegionLocationCache() {
-    return this.metaRegionLocationCache;
+  public MetaLocationCache getMetaLocationCache() {
+    return this.metaLocationCache;
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 81ea0c1..aa7dfab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -3036,13 +3036,27 @@ public class MasterRpcServices extends RSRpcServices implements MasterService.Bl
 
   @Override
   public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController rpcController,
-      GetMetaRegionLocationsRequest request) throws ServiceException {
-    GetMetaRegionLocationsResponse.Builder response = GetMetaRegionLocationsResponse.newBuilder();
-    Optional<List<HRegionLocation>> metaLocations =
-        master.getMetaRegionLocationCache().getMetaRegionLocations();
-    metaLocations.ifPresent(hRegionLocations -> hRegionLocations.forEach(
-      location -> response.addMetaLocations(ProtobufUtil.toRegionLocation(location))));
-    return response.build();
+    GetMetaRegionLocationsRequest request) throws ServiceException {
+    MetaLocationCache cache = master.getMetaLocationCache();
+    RegionLocations locs;
+    try {
+      if (cache != null) {
+        locs = cache.locateMeta(HConstants.EMPTY_BYTE_ARRAY, RegionLocateType.CURRENT);
+      } else {
+        locs = master.locateMeta(HConstants.EMPTY_BYTE_ARRAY, RegionLocateType.CURRENT);
+      }
+      GetMetaRegionLocationsResponse.Builder builder = GetMetaRegionLocationsResponse.newBuilder();
+      if (locs != null) {
+        for (HRegionLocation loc : locs) {
+          if (loc != null) {
+            builder.addMetaLocations(ProtobufUtil.toRegionLocation(loc));
+          }
+        }
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
   }
 
   @Override
@@ -3459,11 +3473,16 @@ public class MasterRpcServices extends RSRpcServices implements MasterService.Bl
     byte[] row = request.getRow().toByteArray();
     RegionLocateType locateType = ProtobufUtil.toRegionLocateType(request.getLocateType());
     try {
-      master.checkServiceStarted();
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preLocateMetaRegion(row, locateType);
       }
-      RegionLocations locs = master.locateMeta(row, locateType);
+      MetaLocationCache cache = master.getMetaLocationCache();
+      RegionLocations locs;
+      if (cache != null) {
+        locs = cache.locateMeta(row, locateType);
+      } else {
+        locs = master.locateMeta(row, locateType);
+      }
       List<HRegionLocation> list = new ArrayList<>();
       LocateMetaRegionResponse.Builder builder = LocateMetaRegionResponse.newBuilder();
       if (locs != null) {
@@ -3488,24 +3507,31 @@ public class MasterRpcServices extends RSRpcServices implements MasterService.Bl
     GetAllMetaRegionLocationsRequest request) throws ServiceException {
     boolean excludeOfflinedSplitParents = request.getExcludeOfflinedSplitParents();
     try {
-      master.checkServiceStarted();
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preGetAllMetaRegionLocations(excludeOfflinedSplitParents);
       }
-      List<RegionLocations> locs = master.getAllMetaRegionLocations(excludeOfflinedSplitParents);
-      List<HRegionLocation> list = new ArrayList<>();
-      GetAllMetaRegionLocationsResponse.Builder builder =
-        GetAllMetaRegionLocationsResponse.newBuilder();
-      if (locs != null) {
-        for (RegionLocations ls : locs) {
-          for (HRegionLocation loc : ls) {
-            if (loc != null) {
-              builder.addMetaLocations(ProtobufUtil.toRegionLocation(loc));
-              list.add(loc);
+      MetaLocationCache cache = master.getMetaLocationCache();
+      List<HRegionLocation> list;
+      if (cache != null) {
+        list = cache.getAllMetaRegionLocations(excludeOfflinedSplitParents);
+      } else {
+        List<RegionLocations> locs = master.getAllMetaRegionLocations(excludeOfflinedSplitParents);
+        list = new ArrayList<>();
+        if (locs != null) {
+          for (RegionLocations ls : locs) {
+            for (HRegionLocation loc : ls) {
+              if (loc != null) {
+                list.add(loc);
+              }
             }
           }
         }
       }
+      GetAllMetaRegionLocationsResponse.Builder builder =
+        GetAllMetaRegionLocationsResponse.newBuilder();
+      for (HRegionLocation loc : list) {
+        builder.addMetaLocations(ProtobufUtil.toRegionLocation(loc));
+      }
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().postGetAllMetaRegionLocations(excludeOfflinedSplitParents,
           list);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaLocationCache.java
new file mode 100644
index 0000000..ecf3323
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaLocationCache.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.client.ConnectionUtils.locateRow;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.locateRowBefore;
+import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaCellComparator;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionLocateType;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A cache of meta region locations.
+ */
+@InterfaceAudience.Private
+class MetaLocationCache implements Stoppable {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MetaLocationCache.class);
+
+  static final String SYNC_INTERVAL_SECONDS =
+    "hbase.master.meta-location-cache.sync-interval-seconds";
+
+  // default sync every 1 second.
+  static final int DEFAULT_SYNC_INTERVAL_SECONDS = 1;
+
+  private static final String FETCH_TIMEOUT_MS =
+    "hbase.master.meta-location-cache.fetch-timeout-ms";
+
+  // default timeout 1 second
+  private static final int DEFAULT_FETCH_TIMEOUT_MS = 1000;
+
+  private static final class CacheHolder {
+
+    final NavigableMap<byte[], RegionLocations> cache;
+
+    final List<HRegionLocation> all;
+
+    CacheHolder(List<HRegionLocation> all) {
+      this.all = Collections.unmodifiableList(all);
+      NavigableMap<byte[], SortedSet<HRegionLocation>> startKeyToLocs =
+        new TreeMap<>(MetaCellComparator.ROW_COMPARATOR);
+      for (HRegionLocation loc : all) {
+        if (loc.getRegion().isSplitParent()) {
+          continue;
+        }
+        startKeyToLocs.computeIfAbsent(loc.getRegion().getStartKey(),
+          k -> new TreeSet<>((l1, l2) -> l1.getRegion().compareTo(l2.getRegion()))).add(loc);
+      }
+      this.cache = startKeyToLocs.entrySet().stream().collect(Collectors.collectingAndThen(
+        Collectors.toMap(Map.Entry::getKey, e -> new RegionLocations(e.getValue()), (u, v) -> {
+          throw new IllegalStateException();
+        }, () -> new TreeMap<>(MetaCellComparator.ROW_COMPARATOR)),
+        Collections::unmodifiableNavigableMap));
+    }
+  }
+
+  private volatile CacheHolder holder;
+
+  private volatile boolean stopped = false;
+
+  MetaLocationCache(MasterServices master) {
+    int syncIntervalSeconds =
+      master.getConfiguration().getInt(SYNC_INTERVAL_SECONDS, DEFAULT_SYNC_INTERVAL_SECONDS);
+    int fetchTimeoutMs =
+      master.getConfiguration().getInt(FETCH_TIMEOUT_MS, DEFAULT_FETCH_TIMEOUT_MS);
+    master.getChoreService().scheduleChore(new ScheduledChore(
+      getClass().getSimpleName() + "-Sync-Chore", this, syncIntervalSeconds, 0, TimeUnit.SECONDS) {
+
+      @Override
+      protected void chore() {
+        AsyncClusterConnection conn = master.getAsyncClusterConnection();
+        if (conn != null) {
+          addListener(conn.getAllMetaRegionLocations(fetchTimeoutMs), (locs, error) -> {
+            if (error != null) {
+              LOG.warn("Failed to fetch all meta region locations from active master", error);
+              return;
+            }
+            holder = new CacheHolder(locs);
+          });
+        }
+      }
+    });
+  }
+
+  RegionLocations locateMeta(byte[] row, RegionLocateType locateType) {
+    if (locateType == RegionLocateType.AFTER) {
+      // as we know the exact row after us, so we can just create the new row, and use the same
+      // algorithm to locate it.
+      row = Arrays.copyOf(row, row.length + 1);
+      locateType = RegionLocateType.CURRENT;
+    }
+    CacheHolder holder = this.holder;
+    if (holder == null) {
+      return null;
+    }
+    return locateType.equals(RegionLocateType.BEFORE) ?
+      locateRowBefore(holder.cache, TableName.META_TABLE_NAME, row, RegionInfo.DEFAULT_REPLICA_ID) :
+      locateRow(holder.cache, TableName.META_TABLE_NAME, row, RegionInfo.DEFAULT_REPLICA_ID);
+  }
+
+  List<HRegionLocation> getAllMetaRegionLocations(boolean excludeOfflinedSplitParents) {
+    CacheHolder holder = this.holder;
+    if (holder == null) {
+      return Collections.emptyList();
+    }
+    if (!excludeOfflinedSplitParents) {
+      // just return all the locations
+      return holder.all;
+    } else {
+      return holder.all.stream().filter(l -> !l.getRegion().isSplitParent())
+        .collect(Collectors.toList());
+    }
+  }
+
+  @Override
+  public void stop(String why) {
+    LOG.info("Stopping meta location cache: {}", why);
+    this.stopped = true;
+  }
+
+  @Override
+  public boolean isStopped() {
+    return stopped;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
deleted file mode 100644
index b192a67..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentNavigableMap;
-import java.util.concurrent.ThreadFactory;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.types.CopyOnWriteArrayMap;
-import org.apache.hadoop.hbase.util.RetryCounter;
-import org.apache.hadoop.hbase.util.RetryCounterFactory;
-import org.apache.hadoop.hbase.zookeeper.ZKListener;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-
-/**
- * A cache of meta region location metadata. Registers a listener on ZK to track changes to the meta
- * table znodes. Clients are expected to retry if the meta information is stale. This class is
- * thread-safe (a single instance of this class can be shared by multiple threads without race
- * conditions).
- * @deprecated Now we store meta location in the local store at master side so we should get the
- *             meta location from active master instead of zk, keep it here only for compatibility.
- */
-@Deprecated
-@InterfaceAudience.Private
-public class MetaRegionLocationCache extends ZKListener {
-
-  private static final Logger LOG = LoggerFactory.getLogger(MetaRegionLocationCache.class);
-
-  /**
-   * Maximum number of times we retry when ZK operation times out.
-   */
-  private static final int MAX_ZK_META_FETCH_RETRIES = 10;
-  /**
-   * Sleep interval ms between ZK operation retries.
-   */
-  private static final int SLEEP_INTERVAL_MS_BETWEEN_RETRIES = 1000;
-  private static final int SLEEP_INTERVAL_MS_MAX = 10000;
-  private final RetryCounterFactory retryCounterFactory =
-      new RetryCounterFactory(MAX_ZK_META_FETCH_RETRIES, SLEEP_INTERVAL_MS_BETWEEN_RETRIES);
-
-  /**
-   * Cached meta region locations indexed by replica ID.
-   * CopyOnWriteArrayMap ensures synchronization during updates and a consistent snapshot during
-   * client requests. Even though CopyOnWriteArrayMap copies the data structure for every write,
-   * that should be OK since the size of the list is often small and mutations are not too often
-   * and we do not need to block client requests while mutations are in progress.
-   */
-  private final CopyOnWriteArrayMap<Integer, HRegionLocation> cachedMetaLocations;
-
-  private enum ZNodeOpType {
-    INIT,
-    CREATED,
-    CHANGED,
-    DELETED
-  }
-
-  public MetaRegionLocationCache(ZKWatcher zkWatcher) {
-    super(zkWatcher);
-    cachedMetaLocations = new CopyOnWriteArrayMap<>();
-    watcher.registerListener(this);
-    // Populate the initial snapshot of data from meta znodes.
-    // This is needed because stand-by masters can potentially start after the initial znode
-    // creation. It blocks forever until the initial meta locations are loaded from ZK and watchers
-    // are established. Subsequent updates are handled by the registered listener. Also, this runs
-    // in a separate thread in the background to not block master init.
-    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).build();
-    RetryCounterFactory retryFactory = new RetryCounterFactory(
-        Integer.MAX_VALUE, SLEEP_INTERVAL_MS_BETWEEN_RETRIES, SLEEP_INTERVAL_MS_MAX);
-    threadFactory.newThread(
-      ()->loadMetaLocationsFromZk(retryFactory.create(), ZNodeOpType.INIT)).start();
-  }
-
-  /**
-   * Populates the current snapshot of meta locations from ZK. If no meta znodes exist, it registers
-   * a watcher on base znode to check for any CREATE/DELETE events on the children.
-   * @param retryCounter controls the number of retries and sleep between retries.
-   */
-  private void loadMetaLocationsFromZk(RetryCounter retryCounter, ZNodeOpType opType) {
-    List<String> znodes = null;
-    while (retryCounter.shouldRetry()) {
-      try {
-        znodes = watcher.getMetaReplicaNodesAndWatchChildren();
-        break;
-      } catch (KeeperException ke) {
-        LOG.debug("Error populating initial meta locations", ke);
-        if (!retryCounter.shouldRetry()) {
-          // Retries exhausted and watchers not set. This is not a desirable state since the cache
-          // could remain stale forever. Propagate the exception.
-          watcher.abort("Error populating meta locations", ke);
-          return;
-        }
-        try {
-          retryCounter.sleepUntilNextRetry();
-        } catch (InterruptedException ie) {
-          LOG.error("Interrupted while loading meta locations from ZK", ie);
-          Thread.currentThread().interrupt();
-          return;
-        }
-      }
-    }
-    if (znodes == null || znodes.isEmpty()) {
-      // No meta znodes exist at this point but we registered a watcher on the base znode to listen
-      // for updates. They will be handled via nodeChildrenChanged().
-      return;
-    }
-    if (znodes.size() == cachedMetaLocations.size()) {
-      // No new meta znodes got added.
-      return;
-    }
-    for (String znode: znodes) {
-      String path = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode);
-      updateMetaLocation(path, opType);
-    }
-  }
-
-  /**
-   * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for
-   * future updates.
-   * @param replicaId ReplicaID of the region.
-   * @return HRegionLocation for the meta replica.
-   * @throws KeeperException if there is any issue fetching/parsing the serialized data.
-   */
-  private HRegionLocation getMetaRegionLocation(int replicaId)
-      throws KeeperException {
-    RegionState metaRegionState;
-    try {
-      byte[] data = ZKUtil.getDataAndWatch(watcher,
-          watcher.getZNodePaths().getZNodeForReplica(replicaId));
-      metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId);
-    } catch (DeserializationException e) {
-      throw ZKUtil.convert(e);
-    }
-    return new HRegionLocation(metaRegionState.getRegion(), metaRegionState.getServerName());
-  }
-
-  private void updateMetaLocation(String path, ZNodeOpType opType) {
-    if (!isValidMetaPath(path)) {
-      return;
-    }
-    LOG.debug("Updating meta znode for path {}: {}", path, opType.name());
-    int replicaId = watcher.getZNodePaths().getMetaReplicaIdFromPath(path);
-    RetryCounter retryCounter = retryCounterFactory.create();
-    HRegionLocation location = null;
-    while (retryCounter.shouldRetry()) {
-      try {
-        if (opType == ZNodeOpType.DELETED) {
-          if (!ZKUtil.watchAndCheckExists(watcher, path)) {
-            // The path does not exist, we've set the watcher and we can break for now.
-            break;
-          }
-          // If it is a transient error and the node appears right away, we fetch the
-          // latest meta state.
-        }
-        location = getMetaRegionLocation(replicaId);
-        break;
-      } catch (KeeperException e) {
-        LOG.debug("Error getting meta location for path {}", path, e);
-        if (!retryCounter.shouldRetry()) {
-          LOG.warn("Error getting meta location for path {}. Retries exhausted.", path, e);
-          break;
-        }
-        try {
-          retryCounter.sleepUntilNextRetry();
-        } catch (InterruptedException ie) {
-          Thread.currentThread().interrupt();
-          return;
-        }
-      }
-    }
-    if (location == null) {
-      cachedMetaLocations.remove(replicaId);
-      return;
-    }
-    cachedMetaLocations.put(replicaId, location);
-  }
-
-  /**
-   * @return Optional list of HRegionLocations for meta replica(s), null if the cache is empty.
-   *
-   */
-  public Optional<List<HRegionLocation>> getMetaRegionLocations() {
-    ConcurrentNavigableMap<Integer, HRegionLocation> snapshot =
-        cachedMetaLocations.tailMap(cachedMetaLocations.firstKey());
-    if (snapshot.isEmpty()) {
-      // This could be possible if the master has not successfully initialized yet or meta region
-      // is stuck in some weird state.
-      return Optional.empty();
-    }
-    List<HRegionLocation> result = new ArrayList<>();
-    // Explicitly iterate instead of new ArrayList<>(snapshot.values()) because the underlying
-    // ArrayValueCollection does not implement toArray().
-    snapshot.values().forEach(location -> result.add(location));
-    return Optional.of(result);
-  }
-
-  /**
-   * Helper to check if the given 'path' corresponds to a meta znode. This listener is only
-   * interested in changes to meta znodes.
-   */
-  private boolean isValidMetaPath(String path) {
-    return watcher.getZNodePaths().isMetaZNodePath(path);
-  }
-
-  @Override
-  public void nodeCreated(String path) {
-    updateMetaLocation(path, ZNodeOpType.CREATED);
-  }
-
-  @Override
-  public void nodeDeleted(String path) {
-    updateMetaLocation(path, ZNodeOpType.DELETED);
-  }
-
-  @Override
-  public void nodeDataChanged(String path) {
-    updateMetaLocation(path, ZNodeOpType.CHANGED);
-  }
-
-  @Override
-  public void nodeChildrenChanged(String path) {
-    if (!path.equals(watcher.getZNodePaths().baseZNode)) {
-      return;
-    }
-    loadMetaLocationsFromZk(retryCounterFactory.create(), ZNodeOpType.CHANGED);
-  }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index bd0b423..a0731da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -288,7 +288,7 @@ public class HRegionServer extends Thread implements
   /**
    * The asynchronous cluster connection to be shared by services.
    */
-  protected AsyncClusterConnection asyncClusterConnection;
+  protected volatile AsyncClusterConnection asyncClusterConnection;
 
   /**
    * Go here to get table descriptors.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
index 8755749..4385a5a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
@@ -22,6 +22,7 @@ import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -157,4 +158,9 @@ public class DummyAsyncClusterConnection implements AsyncClusterConnection {
   public Connection toConnection() {
     return null;
   }
+
+  @Override
+  public CompletableFuture<List<HRegionLocation>> getAllMetaRegionLocations(int callTimeoutMs) {
+    return null;
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java
index ea1122c..72fc705 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java
@@ -17,8 +17,11 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 
 /**
@@ -28,7 +31,7 @@ import org.apache.hadoop.hbase.ServerName;
 public class DummyConnectionRegistry implements ConnectionRegistry {
 
   public static final String REGISTRY_IMPL_CONF_KEY =
-      HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY;
+    HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY;
 
   @Override
   public CompletableFuture<String> getClusterId() {
@@ -43,4 +46,15 @@ public class DummyConnectionRegistry implements ConnectionRegistry {
   @Override
   public void close() {
   }
+
+  @Override
+  public CompletableFuture<RegionLocations> locateMeta(byte[] row, RegionLocateType type) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<List<HRegionLocation>>
+    getAllMetaRegionLocations(boolean excludeOfflinedSplitParents) {
+    return null;
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
index 1044d2d..09a0ab0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
@@ -32,7 +31,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
 import org.apache.hadoop.hbase.TableName;
@@ -108,31 +106,6 @@ public class TestMasterRegistry {
     }
   }
 
-  @Test
-  public void testRegistryRPCs() throws Exception {
-    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
-    HMaster activeMaster = TEST_UTIL.getHBaseCluster().getMaster();
-    final int size =
-      activeMaster.getMetaRegionLocationCache().getMetaRegionLocations().get().size();
-    for (int numHedgedReqs = 1; numHedgedReqs <= size; numHedgedReqs++) {
-      conf.setInt(MasterRegistry.MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, numHedgedReqs);
-      try (MasterRegistry registry = new MasterRegistry(conf)) {
-        // Add wait on all replicas being assigned before proceeding w/ test. Failed on occasion
-        // because not all replicas had made it up before test started.
-        RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
-        assertEquals(registry.getClusterId().get(), activeMaster.getClusterId());
-        assertEquals(registry.getActiveMaster().get(), activeMaster.getServerName());
-        List<HRegionLocation> metaLocations =
-          Arrays.asList(registry.getMetaRegionLocations().get().getRegionLocations());
-        List<HRegionLocation> actualMetaLocations =
-          activeMaster.getMetaRegionLocationCache().getMetaRegionLocations().get();
-        Collections.sort(metaLocations);
-        Collections.sort(actualMetaLocations);
-        assertEquals(actualMetaLocations, metaLocations);
-      }
-    }
-  }
-
   /**
    * Tests that the list of masters configured in the MasterRegistry is dynamically refreshed in the
    * event of errors.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
deleted file mode 100644
index d79f8ca..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.MultithreadedTestUtil;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.master.MetaRegionLocationCache;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.JVMClusterUtil;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
-
-@Category({ SmallTests.class, MasterTests.class })
-public class TestMetaRegionLocationCache {
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestMetaRegionLocationCache.class);
-
-  private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
-  private static ConnectionRegistry REGISTRY;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    TEST_UTIL.startMiniCluster(3);
-    HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
-    REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
-    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
-    TEST_UTIL.getAdmin().balancerSwitch(false, true);
-  }
-
-  @AfterClass
-  public static void cleanUp() throws Exception {
-    Closeables.close(REGISTRY, true);
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  private List<HRegionLocation> getCurrentMetaLocations(ZKWatcher zk) throws Exception {
-    List<HRegionLocation> result = new ArrayList<>();
-    for (String znode : zk.getMetaReplicaNodes()) {
-      String path = ZNodePaths.joinZNode(zk.getZNodePaths().baseZNode, znode);
-      int replicaId = zk.getZNodePaths().getMetaReplicaIdFromPath(path);
-      RegionState state = MetaTableLocator.getMetaRegionState(zk, replicaId);
-      result.add(new HRegionLocation(state.getRegion(), state.getServerName()));
-    }
-    return result;
-  }
-
-  // Verifies that the cached meta locations in the given master are in sync with what is in ZK.
-  private void verifyCachedMetaLocations(HMaster master) throws Exception {
-    // Wait until initial meta locations are loaded.
-    int retries = 0;
-    while (!master.getMetaRegionLocationCache().getMetaRegionLocations().isPresent()) {
-      Thread.sleep(1000);
-      if (++retries == 10) {
-        break;
-      }
-    }
-    List<HRegionLocation> metaHRLs =
-      master.getMetaRegionLocationCache().getMetaRegionLocations().get();
-    assertFalse(metaHRLs.isEmpty());
-    ZKWatcher zk = master.getZooKeeper();
-    List<String> metaZnodes = zk.getMetaReplicaNodes();
-    // Wait till all replicas available.
-    retries = 0;
-    while (master.getMetaRegionLocationCache().getMetaRegionLocations().get().size() !=
-        metaZnodes.size()) {
-      Thread.sleep(1000);
-      if (++retries == 10) {
-        break;
-      }
-    }
-    assertEquals(metaZnodes.size(), metaHRLs.size());
-    List<HRegionLocation> actualHRLs = getCurrentMetaLocations(zk);
-    Collections.sort(metaHRLs);
-    Collections.sort(actualHRLs);
-    assertEquals(actualHRLs, metaHRLs);
-  }
-
-  @Test
-  public void testInitialMetaLocations() throws Exception {
-    verifyCachedMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster());
-  }
-
-  @Test
-  public void testStandByMetaLocations() throws Exception {
-    HMaster standBy = TEST_UTIL.getMiniHBaseCluster().startMaster().getMaster();
-    standBy.isInitialized();
-    verifyCachedMetaLocations(standBy);
-  }
-
-  /*
-   * Shuffles the meta region replicas around the cluster and makes sure the cache is not stale.
-   */
-  @Test
-  public void testMetaLocationsChange() throws Exception {
-    List<HRegionLocation> currentMetaLocs =
-      getCurrentMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper());
-    // Move these replicas to random servers.
-    for (HRegionLocation location : currentMetaLocs) {
-      RegionReplicaTestHelper.moveRegion(TEST_UTIL, location);
-    }
-    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
-    for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster()
-      .getMasterThreads()) {
-      verifyCachedMetaLocations(masterThread.getMaster());
-    }
-  }
-
-  /**
-   * Tests MetaRegionLocationCache's init procedure to make sure that it correctly watches the base
-   * znode for notifications.
-   */
-  @Test
-  public void testMetaRegionLocationCache() throws Exception {
-    final String parentZnodeName = "/randomznodename";
-    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
-    conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parentZnodeName);
-    ServerName sn = ServerName.valueOf("localhost", 1234, 5678);
-    try (ZKWatcher zkWatcher = new ZKWatcher(conf, null, null, true)) {
-      // A thread that repeatedly creates and drops an unrelated child znode. This is to simulate
-      // some ZK activity in the background.
-      MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf);
-      ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
-        @Override
-        public void doAnAction() throws Exception {
-          final String testZnode = parentZnodeName + "/child";
-          ZKUtil.createNodeIfNotExistsAndWatch(zkWatcher, testZnode, testZnode.getBytes());
-          ZKUtil.deleteNode(zkWatcher, testZnode);
-        }
-      });
-      ctx.startThreads();
-      try {
-        MetaRegionLocationCache metaCache = new MetaRegionLocationCache(zkWatcher);
-        // meta znodes do not exist at this point, cache should be empty.
-        assertFalse(metaCache.getMetaRegionLocations().isPresent());
-        // Set the meta locations for a random meta replicas, simulating an active hmaster meta
-        // assignment.
-        for (int i = 0; i < 3; i++) {
-          // Updates the meta znodes.
-          MetaTableLocator.setMetaLocation(zkWatcher, sn, i, RegionState.State.OPEN);
-        }
-        // Wait until the meta cache is populated.
-        int iters = 0;
-        while (iters++ < 10) {
-          if (metaCache.getMetaRegionLocations().isPresent() &&
-            metaCache.getMetaRegionLocations().get().size() == 3) {
-            break;
-          }
-          Thread.sleep(1000);
-        }
-        List<HRegionLocation> metaLocations = metaCache.getMetaRegionLocations().get();
-        assertEquals(3, metaLocations.size());
-        for (HRegionLocation location : metaLocations) {
-          assertEquals(sn, location.getServerName());
-        }
-      } finally {
-        // clean up.
-        ctx.stop();
-        ZKUtil.deleteChildrenRecursively(zkWatcher, parentZnodeName);
-      }
-    }
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
index c00dd39..a47c742 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
@@ -113,7 +113,7 @@ public class TestZKConnectionRegistry {
   }
 
   @Test
-  public void testNoMetaAvailable() throws InterruptedException {
+  public void testNoMetaAvailable() throws InterruptedException, IOException {
     Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
     conf.set("zookeeper.znode.metaserver", "whatever");
     try (ZKConnectionRegistry registry = new ZKConnectionRegistry(conf)) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
index d17ecf8..a97c9a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java
@@ -21,8 +21,10 @@ package org.apache.hadoop.hbase.master;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
 import static org.junit.Assert.assertEquals;
+
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -30,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
@@ -40,26 +43,35 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetMetaRegionLocationsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionLocateType;
 
-@Category({MediumTests.class, MasterTests.class})
+@Category({ MediumTests.class, MasterTests.class })
 public class TestClientMetaServiceRPCs {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestClientMetaServiceRPCs.class);
+    HBaseClassTestRule.forClass(TestClientMetaServiceRPCs.class);
 
   // Total number of masters (active + stand by) for the purpose of this test.
   private static final int MASTER_COUNT = 3;
@@ -75,10 +87,21 @@ public class TestClientMetaServiceRPCs {
     builder.numMasters(MASTER_COUNT).numRegionServers(3);
     TEST_UTIL.startMiniCluster(builder.build());
     conf = TEST_UTIL.getConfiguration();
-    rpcTimeout = (int) Math.min(Integer.MAX_VALUE, TimeUnit.MILLISECONDS.toNanos(
-        conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)));
+    rpcTimeout = (int) Math.min(Integer.MAX_VALUE, TimeUnit.MILLISECONDS
+      .toNanos(conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)));
     rpcClient = RpcClientFactory.createClient(conf,
-        TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId());
+      TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId());
+    // make sure all masters have cluster connection set up
+    TEST_UTIL.waitFor(30000, () -> {
+      for (MasterThread mt : TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+        if (mt.getMaster().getAsyncClusterConnection() == null) {
+          return false;
+        }
+      }
+      return true;
+    });
+    Thread.sleep(2 * conf.getInt(MetaLocationCache.SYNC_INTERVAL_SECONDS,
+      MetaLocationCache.DEFAULT_SYNC_INTERVAL_SECONDS) * 1000);
   }
 
   @AfterClass
@@ -90,9 +113,9 @@ public class TestClientMetaServiceRPCs {
   }
 
   private static ClientMetaService.BlockingInterface getMasterStub(ServerName server)
-      throws IOException {
-    return ClientMetaService.newBlockingStub(
-        rpcClient.createBlockingRpcChannel(server, User.getCurrent(), rpcTimeout));
+    throws IOException {
+    return ClientMetaService
+      .newBlockingStub(rpcClient.createBlockingRpcChannel(server, User.getCurrent(), rpcTimeout));
   }
 
   private static HBaseRpcController getRpcController() {
@@ -102,16 +125,17 @@ public class TestClientMetaServiceRPCs {
   /**
    * Verifies the cluster ID from all running masters.
    */
-  @Test public void TestClusterID() throws Exception {
+  @Test
+  public void TestClusterID() throws Exception {
     HBaseRpcController rpcController = getRpcController();
     String clusterID = TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId();
     int rpcCount = 0;
-    for (JVMClusterUtil.MasterThread masterThread:
-        TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+    for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster()
+      .getMasterThreads()) {
       ClientMetaService.BlockingInterface stub =
-          getMasterStub(masterThread.getMaster().getServerName());
+        getMasterStub(masterThread.getMaster().getServerName());
       GetClusterIdResponse resp =
-          stub.getClusterId(rpcController, GetClusterIdRequest.getDefaultInstance());
+        stub.getClusterId(rpcController, GetClusterIdRequest.getDefaultInstance());
       assertEquals(clusterID, resp.getClusterId());
       rpcCount++;
     }
@@ -121,40 +145,93 @@ public class TestClientMetaServiceRPCs {
   /**
    * Verifies the active master ServerName as seen by all masters.
    */
-  @Test public void TestActiveMaster() throws Exception {
+  @Test
+  public void TestActiveMaster() throws Exception {
     HBaseRpcController rpcController = getRpcController();
     ServerName activeMaster = TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName();
     int rpcCount = 0;
-    for (JVMClusterUtil.MasterThread masterThread:
-        TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+    for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster()
+      .getMasterThreads()) {
       ClientMetaService.BlockingInterface stub =
-          getMasterStub(masterThread.getMaster().getServerName());
+        getMasterStub(masterThread.getMaster().getServerName());
       GetActiveMasterResponse resp =
-          stub.getActiveMaster(rpcController, GetActiveMasterRequest.getDefaultInstance());
+        stub.getActiveMaster(rpcController, GetActiveMasterRequest.getDefaultInstance());
       assertEquals(activeMaster, ProtobufUtil.toServerName(resp.getServerName()));
       rpcCount++;
     }
     assertEquals(MASTER_COUNT, rpcCount);
   }
 
+  private List<HRegionLocation> getMetaLocations() throws IOException {
+    List<HRegionLocation> metaLocations = new ArrayList<>();
+    for (RegionLocations locs : TEST_UTIL.getMiniHBaseCluster().getMaster()
+      .getAllMetaRegionLocations(true)) {
+      metaLocations.addAll(Arrays.asList(locs.getRegionLocations()));
+    }
+    Collections.sort(metaLocations);
+    return metaLocations;
+  }
+
   /**
    * Verifies that the meta region locations RPC returns consistent results across all masters.
    */
-  @Test public void TestMetaLocations() throws Exception {
+  @Test
+  public void TestMetaLocations() throws Exception {
     HBaseRpcController rpcController = getRpcController();
-    List<HRegionLocation> metaLocations = TEST_UTIL.getMiniHBaseCluster().getMaster()
-        .getMetaRegionLocationCache().getMetaRegionLocations().get();
-    Collections.sort(metaLocations);
+    List<HRegionLocation> metaLocations = getMetaLocations();
+    int rpcCount = 0;
+    for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster()
+      .getMasterThreads()) {
+      ClientMetaService.BlockingInterface stub =
+        getMasterStub(masterThread.getMaster().getServerName());
+      GetMetaRegionLocationsResponse resp = stub.getMetaRegionLocations(rpcController,
+        GetMetaRegionLocationsRequest.getDefaultInstance());
+      List<HRegionLocation> result = new ArrayList<>();
+      resp.getMetaLocationsList()
+        .forEach(location -> result.add(ProtobufUtil.toRegionLocation(location)));
+      Collections.sort(result);
+      assertEquals(metaLocations, result);
+      rpcCount++;
+    }
+    assertEquals(MASTER_COUNT, rpcCount);
+  }
+
+  @Test
+  public void testLocateMeta() throws Exception {
+    HBaseRpcController rpcController = getRpcController();
+    List<HRegionLocation> metaLocations = getMetaLocations();
+    int rpcCount = 0;
+    for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster()
+      .getMasterThreads()) {
+      ClientMetaService.BlockingInterface stub =
+        getMasterStub(masterThread.getMaster().getServerName());
+      LocateMetaRegionResponse resp = stub.locateMetaRegion(rpcController,
+        LocateMetaRegionRequest.newBuilder().setRow(ByteString.EMPTY)
+          .setLocateType(RegionLocateType.REGION_LOCATE_TYPE_CURRENT).build());
+      List<HRegionLocation> result = new ArrayList<>();
+      resp.getMetaLocationsList()
+        .forEach(location -> result.add(ProtobufUtil.toRegionLocation(location)));
+      Collections.sort(result);
+      assertEquals(metaLocations, result);
+      rpcCount++;
+    }
+    assertEquals(MASTER_COUNT, rpcCount);
+  }
+
+  @Test
+  public void testGetAllMetaLocations() throws Exception {
+    HBaseRpcController rpcController = getRpcController();
+    List<HRegionLocation> metaLocations = getMetaLocations();
     int rpcCount = 0;
-    for (JVMClusterUtil.MasterThread masterThread:
-      TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+    for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster()
+      .getMasterThreads()) {
       ClientMetaService.BlockingInterface stub =
-          getMasterStub(masterThread.getMaster().getServerName());
-      GetMetaRegionLocationsResponse resp = stub.getMetaRegionLocations(
-          rpcController, GetMetaRegionLocationsRequest.getDefaultInstance());
+        getMasterStub(masterThread.getMaster().getServerName());
+      GetAllMetaRegionLocationsResponse resp = stub.getAllMetaRegionLocations(rpcController,
+        GetAllMetaRegionLocationsRequest.newBuilder().setExcludeOfflinedSplitParents(true).build());
       List<HRegionLocation> result = new ArrayList<>();
-      resp.getMetaLocationsList().forEach(
-        location -> result.add(ProtobufUtil.toRegionLocation(location)));
+      resp.getMetaLocationsList()
+        .forEach(location -> result.add(ProtobufUtil.toRegionLocation(location)));
       Collections.sort(result);
       assertEquals(metaLocations, result);
       rpcCount++;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaLocationCache.java
new file mode 100644
index 0000000..306767e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaLocationCache.java
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocateType;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FutureUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestMetaLocationCache {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestMetaLocationCache.class);
+
+  private static Configuration CONF = HBaseConfiguration.create();
+
+  private static ChoreService CHORE_SERVICE;
+
+  private static byte[] SPLIT = Bytes.toBytes("a");
+
+  private MasterServices master;
+
+  private MetaLocationCache cache;
+
+  @BeforeClass
+  public static void setUpBeforeClass() {
+    CONF.setInt(MetaLocationCache.SYNC_INTERVAL_SECONDS, 1);
+    CHORE_SERVICE = new ChoreService("TestMetaLocationCache");
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() {
+    CHORE_SERVICE.shutdown();
+  }
+
+  @Before
+  public void setUp() {
+    master = mock(MasterServices.class);
+    when(master.getConfiguration()).thenReturn(CONF);
+    when(master.getChoreService()).thenReturn(CHORE_SERVICE);
+    cache = new MetaLocationCache(master);
+  }
+
+  @After
+  public void tearDown() {
+    if (cache != null) {
+      cache.stop("test end");
+    }
+  }
+
+  @Test
+  public void testError() throws InterruptedException {
+    AsyncClusterConnection conn = mock(AsyncClusterConnection.class);
+    when(conn.getAllMetaRegionLocations(anyInt()))
+      .thenReturn(FutureUtils.failedFuture(new RuntimeException("inject error")));
+    when(master.getAsyncClusterConnection()).thenReturn(conn);
+    Thread.sleep(2000);
+    assertNull(cache.locateMeta(HConstants.EMPTY_BYTE_ARRAY, RegionLocateType.CURRENT));
+    assertTrue(cache.getAllMetaRegionLocations(true).isEmpty());
+
+    HRegionLocation loc =
+      new HRegionLocation(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(),
+        ServerName.valueOf("localhost", 12345, System.currentTimeMillis()));
+    when(conn.getAllMetaRegionLocations(anyInt()))
+      .thenReturn(CompletableFuture.completedFuture(Arrays.asList(loc)));
+    Thread.sleep(2000);
+    List<HRegionLocation> list = cache.getAllMetaRegionLocations(false);
+    assertEquals(1, list.size());
+    assertEquals(loc, list.get(0));
+  }
+
+  private void prepareData() throws InterruptedException {
+    AsyncClusterConnection conn = mock(AsyncClusterConnection.class);
+    RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setSplit(true)
+      .setOffline(true).build();
+    RegionInfo daughter1 =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(SPLIT).build();
+    RegionInfo daughter2 =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(SPLIT).build();
+    HRegionLocation parentLoc = new HRegionLocation(parent,
+      ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis()));
+    HRegionLocation daughter1Loc = new HRegionLocation(daughter1,
+      ServerName.valueOf("127.0.0.2", 12345, System.currentTimeMillis()));
+    HRegionLocation daughter2Loc = new HRegionLocation(daughter2,
+      ServerName.valueOf("127.0.0.3", 12345, System.currentTimeMillis()));
+    when(conn.getAllMetaRegionLocations(anyInt())).thenReturn(
+      CompletableFuture.completedFuture(Arrays.asList(parentLoc, daughter1Loc, daughter2Loc)));
+    when(master.getAsyncClusterConnection()).thenReturn(conn);
+    Thread.sleep(2000);
+  }
+
+  @Test
+  public void testLocateMeta() throws InterruptedException {
+    prepareData();
+    RegionLocations locs = cache.locateMeta(SPLIT, RegionLocateType.BEFORE);
+    assertEquals(1, locs.size());
+    HRegionLocation loc = locs.getDefaultRegionLocation();
+    assertArrayEquals(SPLIT, loc.getRegion().getEndKey());
+
+    locs = cache.locateMeta(SPLIT, RegionLocateType.CURRENT);
+    assertEquals(1, locs.size());
+    loc = locs.getDefaultRegionLocation();
+    assertArrayEquals(SPLIT, loc.getRegion().getStartKey());
+
+    locs = cache.locateMeta(SPLIT, RegionLocateType.AFTER);
+    assertEquals(1, locs.size());
+    loc = locs.getDefaultRegionLocation();
+    assertArrayEquals(SPLIT, loc.getRegion().getStartKey());
+  }
+
+  @Test
+  public void testGetAllMetaRegionLocations() throws InterruptedException {
+    prepareData();
+    List<HRegionLocation> locs = cache.getAllMetaRegionLocations(false);
+    assertEquals(3, locs.size());
+    HRegionLocation loc = locs.get(0);
+    assertTrue(loc.getRegion().isSplitParent());
+    loc = locs.get(1);
+    assertArrayEquals(SPLIT, loc.getRegion().getEndKey());
+    loc = locs.get(2);
+    assertArrayEquals(SPLIT, loc.getRegion().getStartKey());
+
+    locs = cache.getAllMetaRegionLocations(true);
+    assertEquals(2, locs.size());
+    loc = locs.get(0);
+    assertArrayEquals(SPLIT, loc.getRegion().getEndKey());
+    loc = locs.get(1);
+    assertArrayEquals(SPLIT, loc.getRegion().getStartKey());
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
index 0cc510f..ad8ad7a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
@@ -122,7 +122,6 @@ public class TestRegionAssignedToMultipleRegionServers {
 
     @Override
     protected ServerManager createServerManager(MasterServices master) throws IOException {
-      setupClusterConnection();
       return new ServerManagerForTest(master);
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
index 7fbf28b..a271f17 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
@@ -129,7 +129,6 @@ public class TestReportRegionStateTransitionFromDeadServer {
 
     @Override
     protected ServerManager createServerManager(MasterServices master) throws IOException {
-      setupClusterConnection();
       return new ServerManagerForTest(master);
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
index c4ad67c..eeeeda6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
@@ -142,7 +142,6 @@ public class TestSCPGetRegionsRace {
 
     @Override
     protected ServerManager createServerManager(MasterServices master) throws IOException {
-      setupClusterConnection();
       return new ServerManagerForTest(master);
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
index ee01223..0bc97fa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
@@ -210,7 +210,6 @@ public class TestWakeUpUnexpectedProcedure {
 
     @Override
     protected ServerManager createServerManager(MasterServices master) throws IOException {
-      setupClusterConnection();
       return new SMForTest(master);
     }
   }

[hbase] 05/09: HBASE-24391 Implement meta split (#2010)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 981d9d01cfb3f6e868d683cb3aef27a2f6e7a4ad
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Mon Sep 7 10:59:43 2020 +0800

    HBASE-24391 Implement meta split (#2010)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../AsyncScanSingleRegionRpcRetryingCaller.java    | 47 +++++++++--
 .../hadoop/hbase/client/ConnectionUtils.java       | 26 ------
 .../hbase/client/TableRegionLocationCache.java     |  2 +-
 .../hbase/master/assignment/AssignmentManager.java |  3 +-
 .../hbase/master/assignment/RegionStateStore.java  | 52 +++++++-----
 .../master/procedure/ModifyTableProcedure.java     | 16 +++-
 .../DelimitedKeyPrefixRegionSplitPolicy.java       |  2 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  5 --
 .../apache/hadoop/hbase/regionserver/HStore.java   |  2 -
 .../hbase/regionserver/MetaRegionSplitPolicy.java  | 40 +++++++++
 .../hbase/regionserver/RegionSplitPolicy.java      |  3 +-
 .../hadoop/hbase/util/FSTableDescriptors.java      |  8 +-
 .../apache/hadoop/hbase/TestMetaSplitBySize.java   | 62 ++++++++++++++
 .../hbase/TestMetaUpdatesGoToPriorityQueue.java    |  2 +-
 .../apache/hadoop/hbase/TestSimpleMetaSplit.java   | 98 ++++++++++++++++++++++
 .../client/TestShutdownOfMetaReplicaHolder.java    | 22 ++++-
 .../master/assignment/TestRegionStateStore.java    | 28 +++----
 17 files changed, 323 insertions(+), 95 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index 1fa3c81..3c41df1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -21,8 +21,8 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.incRPCCallsMetrics;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.incRPCRetriesMetrics;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForReverseScan;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForScan;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.updateResultsMetrics;
@@ -30,6 +30,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.updateServerSideMet
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Comparator;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
@@ -37,6 +38,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.CallQueueTooBigException;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaCellComparator;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.UnknownScannerException;
 import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer.ScanResumer;
@@ -45,6 +47,7 @@ import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
 import org.apache.hadoop.hbase.exceptions.ScannerResetException;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -114,6 +117,8 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 
   private final HBaseRpcController controller;
 
+  private final Comparator<byte[]> comparator;
+
   private byte[] nextStartRowWhenError;
 
   private boolean includeNextStartRowWhenError;
@@ -304,11 +309,11 @@ class AsyncScanSingleRegionRpcRetryingCaller {
   }
 
   public AsyncScanSingleRegionRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl conn,
-      Scan scan, ScanMetrics scanMetrics, long scannerId, ScanResultCache resultCache,
-      AdvancedScanResultConsumer consumer, Interface stub, HRegionLocation loc,
-      boolean isRegionServerRemote, int priority, long scannerLeaseTimeoutPeriodNs, long pauseNs,
-      long pauseForCQTBENs, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs,
-      int startLogErrorsCnt) {
+    Scan scan, ScanMetrics scanMetrics, long scannerId, ScanResultCache resultCache,
+    AdvancedScanResultConsumer consumer, Interface stub, HRegionLocation loc,
+    boolean isRegionServerRemote, int priority, long scannerLeaseTimeoutPeriodNs, long pauseNs,
+    long pauseForCQTBENs, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs,
+    int startLogErrorsCnt) {
     this.retryTimer = retryTimer;
     this.scan = scan;
     this.scanMetrics = scanMetrics;
@@ -335,6 +340,8 @@ class AsyncScanSingleRegionRpcRetryingCaller {
     this.controller = conn.rpcControllerFactory.newController();
     this.controller.setPriority(priority);
     this.exceptions = new ArrayList<>();
+    this.comparator =
+      loc.getRegion().isMetaRegion() ? MetaCellComparator.ROW_COMPARATOR : Bytes.BYTES_COMPARATOR;
   }
 
   private long elapsedMs() {
@@ -442,6 +449,32 @@ class AsyncScanSingleRegionRpcRetryingCaller {
     includeNextStartRowWhenError = result.mayHaveMoreCellsInRow();
   }
 
+  private boolean noMoreResultsForScan(Scan scan, RegionInfo info) {
+    if (isEmptyStopRow(info.getEndKey())) {
+      return true;
+    }
+    if (isEmptyStopRow(scan.getStopRow())) {
+      return false;
+    }
+    int c = comparator.compare(info.getEndKey(), scan.getStopRow());
+    // 1. if our stop row is less than the endKey of the region
+    // 2. if our stop row is equal to the endKey of the region and we do not include the stop row
+    // for scan.
+    return c > 0 || (c == 0 && !scan.includeStopRow());
+  }
+
+  private boolean noMoreResultsForReverseScan(Scan scan, RegionInfo info) {
+    if (isEmptyStartRow(info.getStartKey())) {
+      return true;
+    }
+    if (isEmptyStopRow(scan.getStopRow())) {
+      return false;
+    }
+    // no need to test the inclusive of the stop row as the start key of a region is included in
+    // the region.
+    return comparator.compare(info.getStartKey(), scan.getStopRow()) <= 0;
+  }
+
   private void completeWhenNoMoreResultsInRegion() {
     if (noMoreResultsForScan(scan, loc.getRegion())) {
       completeNoMoreResults();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 1433c55..4697153 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -280,32 +280,6 @@ public final class ConnectionUtils {
     }
   }
 
-  static boolean noMoreResultsForScan(Scan scan, RegionInfo info) {
-    if (isEmptyStopRow(info.getEndKey())) {
-      return true;
-    }
-    if (isEmptyStopRow(scan.getStopRow())) {
-      return false;
-    }
-    int c = Bytes.compareTo(info.getEndKey(), scan.getStopRow());
-    // 1. if our stop row is less than the endKey of the region
-    // 2. if our stop row is equal to the endKey of the region and we do not include the stop row
-    // for scan.
-    return c > 0 || (c == 0 && !scan.includeStopRow());
-  }
-
-  static boolean noMoreResultsForReverseScan(Scan scan, RegionInfo info) {
-    if (isEmptyStartRow(info.getStartKey())) {
-      return true;
-    }
-    if (isEmptyStopRow(scan.getStopRow())) {
-      return false;
-    }
-    // no need to test the inclusive of the stop row as the start key of a region is included in
-    // the region.
-    return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
-  }
-
   static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
     return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
       .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java
index ed7cb90..4d0be07 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java
@@ -110,7 +110,7 @@ class TableRegionLocationCache {
         // the region is different, here we trust the one we fetched. This maybe wrong but finally
         // the upper layer can detect this and trigger removal of the wrong locations
         if (LOG.isDebugEnabled()) {
-          LOG.debug("The newnly fetch region {} is different from the old one {} for row '{}'," +
+          LOG.debug("The newly fetch region {} is different from the old one {} for row '{}'," +
             " try replaing the old one...", region, oldRegion, Bytes.toStringBinary(startKey));
         }
         if (cache.replace(startKey, oldLocs, locs)) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 1786afe..a625fb3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1911,9 +1911,8 @@ public class AssignmentManager {
   // ============================================================================================
   // The above methods can only be called in TransitRegionStateProcedure(and related procedures)
   // ============================================================================================
-
   public void markRegionAsSplit(final RegionInfo parent, final ServerName serverName,
-      final RegionInfo daughterA, final RegionInfo daughterB) throws IOException {
+    final RegionInfo daughterA, final RegionInfo daughterB) throws IOException {
     // Update hbase:meta. Parent will be marked offline and split up in hbase:meta.
     // The parent stays in regionStates until cleared when removed by CatalogJanitor.
     // Update its state in regionStates to it shows as offline and split when read
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index cce7a81..c290c5e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -271,30 +271,38 @@ public class RegionStateStore {
    */
   private void multiMutate(RegionInfo ri, List<Mutation> mutations) throws IOException {
     debugLogMutations(mutations);
-    byte[] row =
-      Bytes.toBytes(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionNameAsString() +
-        HConstants.DELIMITER);
-    MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
-    for (Mutation mutation : mutations) {
-      if (mutation instanceof Put) {
-        builder.addMutationRequest(
-          ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
-      } else if (mutation instanceof Delete) {
-        builder.addMutationRequest(
-          ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
-      } else {
-        throw new DoNotRetryIOException(
-          "multi in MetaEditor doesn't support " + mutation.getClass().getName());
+    if (ri.isMetaRegion()) {
+      masterRegion.update(region -> {
+        List<byte[]> rowsToLock =
+          mutations.stream().map(Mutation::getRow).collect(Collectors.toList());
+        region.mutateRowsWithLocks(mutations, rowsToLock, HConstants.NO_NONCE, HConstants.NO_NONCE);
+      });
+    } else {
+      byte[] row =
+        Bytes.toBytes(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionNameAsString() +
+          HConstants.DELIMITER);
+      MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
+      for (Mutation mutation : mutations) {
+        if (mutation instanceof Put) {
+          builder.addMutationRequest(
+            ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
+        } else if (mutation instanceof Delete) {
+          builder.addMutationRequest(
+            ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
+        } else {
+          throw new DoNotRetryIOException(
+            "multi in MetaEditor doesn't support " + mutation.getClass().getName());
+        }
       }
+      MutateRowsRequest request = builder.build();
+      AsyncTable<?> table =
+        master.getConnection().toAsyncConnection().getTable(TableName.META_TABLE_NAME);
+      CompletableFuture<MutateRowsResponse> future =
+        table.<MultiRowMutationService, MutateRowsResponse> coprocessorService(
+          MultiRowMutationService::newStub,
+          (stub, controller, done) -> stub.mutateRows(controller, request, done), row);
+      FutureUtils.get(future);
     }
-    MutateRowsRequest request = builder.build();
-    AsyncTable<?> table =
-      master.getConnection().toAsyncConnection().getTable(TableName.META_TABLE_NAME);
-    CompletableFuture<MutateRowsResponse> future =
-      table.<MultiRowMutationService, MutateRowsResponse> coprocessorService(
-        MultiRowMutationService::newStub,
-        (stub, controller, done) -> stub.mutateRows(controller, request, done), row);
-    FutureUtils.get(future);
   }
 
   private Table getMetaTable() throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 247dd9c..23aa028 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.regionserver.MetaRegionSplitPolicy;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -101,17 +102,24 @@ public class ModifyTableProcedure
     if (this.modifiedTableDescriptor.isMetaTable()) {
       // If we are modifying the hbase:meta table, make sure we are not deleting critical
       // column families else we'll damage the cluster.
-      Set<byte []> cfs = this.modifiedTableDescriptor.getColumnFamilyNames();
+      Set<byte[]> cfs = this.modifiedTableDescriptor.getColumnFamilyNames();
       for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) {
         if (!cfs.contains(family)) {
-          throw new HBaseIOException("Delete of hbase:meta column family " +
-            Bytes.toString(family));
+          throw new HBaseIOException(
+            "Delete of hbase:meta column family " + Bytes.toString(family));
         }
       }
+      // also check if we want to change the split policy, which is not allowed
+      if (!MetaRegionSplitPolicy.class.getName()
+        .equals(this.modifiedTableDescriptor.getRegionSplitPolicyClassName())) {
+        throw new HBaseIOException("Can not change split policy for hbase:meta to " +
+          this.modifiedTableDescriptor.getRegionSplitPolicyClassName());
+      }
     }
   }
 
-  private void initialize(final TableDescriptor unmodifiedTableDescriptor,
+  private void initialize(
+    final TableDescriptor unmodifiedTableDescriptor,
       final boolean shouldCheckDescriptor) {
     this.unmodifiedTableDescriptor = unmodifiedTableDescriptor;
     this.shouldCheckDescriptor = shouldCheckDescriptor;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
index 241c062..a1fd3f4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
@@ -49,7 +49,7 @@ public class DelimitedKeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundR
       .getLogger(DelimitedKeyPrefixRegionSplitPolicy.class);
   public static final String DELIMITER_KEY = "DelimitedKeyPrefixRegionSplitPolicy.delimiter";
 
-  private byte[] delimiter = null;
+  protected byte[] delimiter = null;
 
   @Override
   public String toString() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 8c4660c..ef1e7ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7912,11 +7912,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
    * Return the split point. An empty result indicates the region isn't splittable.
    */
   public Optional<byte[]> checkSplit(boolean force) {
-    // Can't split META
-    if (this.getRegionInfo().isMetaRegion()) {
-      return Optional.empty();
-    }
-
     // Can't split a region that is closing.
     if (this.isClosing()) {
       return Optional.empty();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 2e1c862..11c3e5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -2009,8 +2009,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
   public Optional<byte[]> getSplitPoint() {
     this.lock.readLock().lock();
     try {
-      // Should already be enforced by the split policy!
-      assert !this.getRegionInfo().isMetaRegion();
       // Not split-able if we find a reference store file present in the store.
       if (hasReferences()) {
         LOG.trace("Not splittable; has references: {}", this);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetaRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetaRegionSplitPolicy.java
new file mode 100644
index 0000000..b0b13fa
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetaRegionSplitPolicy.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The split policy for meta.
+ * <p/>
+ * Now we just use {@link DelimitedKeyPrefixRegionSplitPolicy} with
+ * {@value org.apache.hadoop.hbase.HConstants#DELIMITER}, which means all the records for a table
+ * will be in the same region, so the multi-mutate operation when splitting/merging is still valid.
+ */
+@InterfaceAudience.Private
+public class MetaRegionSplitPolicy extends DelimitedKeyPrefixRegionSplitPolicy {
+
+  @Override
+  protected void configureForRegion(HRegion region) {
+    // TODO: it will issue an error of can not find the delimiter
+    super.configureForRegion(region);
+    delimiter = Bytes.toBytes(HConstants.DELIMITER);
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
index 4a13030..3d94c37 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
@@ -74,8 +74,7 @@ public abstract class RegionSplitPolicy extends Configured {
    * @return {@code true} if the specified region can be split.
    */
   protected boolean canSplit() {
-    return !region.getRegionInfo().isMetaRegion() && region.isAvailable() &&
-      region.getStores().stream().allMatch(HStore::canSplit);
+    return region.isAvailable() && region.getStores().stream().allMatch(HStore::canSplit);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 91de8b0..f64aaf5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.MetaRegionSplitPolicy;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -194,9 +195,10 @@ public class FSTableDescriptors implements TableDescriptors {
         .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
         .setBloomFilterType(BloomType.ROWCOL)
         .build())
-      .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(
-        MultiRowMutationEndpoint.class.getName())
-        .setPriority(Coprocessor.PRIORITY_SYSTEM).build());
+      .setCoprocessor(
+        CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName())
+          .setPriority(Coprocessor.PRIORITY_SYSTEM).build())
+      .setRegionSplitPolicyClassName(MetaRegionSplitPolicy.class.getName());
   }
 
   protected boolean isUsecache() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaSplitBySize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaSplitBySize.java
new file mode 100644
index 0000000..8775247
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaSplitBySize.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MiscTests.class, MediumTests.class })
+public class TestMetaSplitBySize {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestMetaSplitBySize.class);
+
+  private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL.getConfiguration().setLong("hbase.increasing.policy.initial.size", 1024);
+    UTIL.startMiniCluster(3);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void test() throws IOException {
+    byte[] family = Bytes.toBytes("family");
+    for (int i = 0; i < 10; i++) {
+      UTIL.createTable(TableName.valueOf("table_" + i), family);
+    }
+    HRegion region = UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).get(0);
+    region.requestFlush(FlushLifeCycleTracker.DUMMY);
+    UTIL.waitFor(30000, () -> UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).size() > 1);
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java
index cd04157..8396e5a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java
@@ -132,4 +132,4 @@ public class TestMetaUpdatesGoToPriorityQueue {
 
     assertTrue(prevCalls < scheduler.numPriorityCalls);
   }
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplit.java
new file mode 100644
index 0000000..6d50f71
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplit.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MiscTests.class, MediumTests.class })
+public class TestSimpleMetaSplit {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestSimpleMetaSplit.class);
+
+  private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
+
+  private static byte[] CF = Bytes.toBytes("cf");
+
+  private static byte[] CQ = Bytes.toBytes("cq");
+
+  private static TableDescriptor TD1 = TableDescriptorBuilder.newBuilder(TableName.valueOf("a"))
+    .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build();
+
+  private static TableDescriptor TD2 = TableDescriptorBuilder.newBuilder(TableName.valueOf("b"))
+    .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build();
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL.startMiniCluster(3);
+    UTIL.getAdmin().createTable(TD1);
+    UTIL.getAdmin().createTable(TD2);
+    UTIL.waitTableAvailable(TD1.getTableName());
+    UTIL.waitTableAvailable(TD2.getTableName());
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void test() throws IOException {
+    try (Table table = UTIL.getConnection().getTable(TD1.getTableName())) {
+      table.put(new Put(Bytes.toBytes("row1")).addColumn(CF, CQ, Bytes.toBytes("row1")));
+    }
+    try (Table table = UTIL.getConnection().getTable(TD2.getTableName())) {
+      table.put(new Put(Bytes.toBytes("row2")).addColumn(CF, CQ, Bytes.toBytes("row2")));
+    }
+    // split meta
+    UTIL.getAdmin().split(TableName.META_TABLE_NAME, Bytes.toBytes("b"));
+    // do not count it from client as it will reset the location cache for meta table
+    assertEquals(2, UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
+      .getRegionsOfTable(TableName.META_TABLE_NAME).size());
+    // clear the cache for table 'b'
+    try (RegionLocator locator = UTIL.getConnection().getRegionLocator(TD2.getTableName())) {
+      locator.clearRegionLocationCache();
+    }
+    // make sure that we could get the location of the TD2 from the second meta region
+    try (Table table = UTIL.getConnection().getTable(TD2.getTableName())) {
+      Result result = table.get(new Get(Bytes.toBytes("row2")));
+      assertEquals("row2", Bytes.toString(result.getValue(CF, CQ)));
+    }
+    // assert from client side
+    assertEquals(2, UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).size());
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java
index 11e5404..1a3d42b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
+import java.util.List;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
@@ -45,23 +47,35 @@ public class TestShutdownOfMetaReplicaHolder extends MetaWithReplicasTestBase {
     startCluster();
   }
 
+  private HRegionLocation getLoc(RegionLocator locator, int replica)
+    throws IOException, InterruptedException {
+    // we have backup master in this test so we may get stale meta replicas since the cache is
+    // refreshed asynchronously, so add retries here.
+    for (;;) {
+      List<HRegionLocation> locs = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true);
+      if (locs.size() > replica) {
+        return locs.get(1);
+      }
+      Thread.sleep(1000);
+    }
+  }
+
   @Test
   public void testShutdownOfReplicaHolder() throws Exception {
     // checks that the when the server holding meta replica is shut down, the meta replica
     // can be recovered
     try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
       RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) {
-      HRegionLocation hrl = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true).get(1);
+      HRegionLocation hrl = getLoc(locator, 1);
       ServerName oldServer = hrl.getServerName();
       TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer);
       LOG.debug("Waiting for the replica {} to come up", hrl.getRegion());
       TEST_UTIL.waitFor(30000, () -> {
-        HRegionLocation loc = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true).get(1);
+        HRegionLocation loc = getLoc(locator, 1);
         return loc != null && !loc.getServerName().equals(oldServer);
       });
       LOG.debug("Replica {} is online on {}, old server is {}", hrl.getRegion(),
-        locator.getRegionLocations(HConstants.EMPTY_START_ROW, true).get(1).getServerName(),
-        oldServer);
+        getLoc(locator, 1).getServerName(), oldServer);
     }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
index 83e5431..1d2b346 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
@@ -64,7 +64,6 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
-
 @Category({ MasterTests.class, MediumTests.class })
 public class TestRegionStateStore {
 
@@ -89,12 +88,12 @@ public class TestRegionStateStore {
 
   @Test
   public void testVisitMetaForRegionExistingRegion() throws Exception {
-    final TableName tableName = TableName.valueOf("testVisitMetaForRegion");
+    final TableName tableName = name.getTableName();
     UTIL.createTable(tableName, "cf");
     final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
     final String encodedName = regions.get(0).getRegionInfo().getEncodedName();
-    final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster().
-      getAssignmentManager().getRegionStateStore();
+    final RegionStateStore regionStateStore =
+      UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
     final AtomicBoolean visitorCalled = new AtomicBoolean(false);
     regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() {
       @Override
@@ -109,18 +108,18 @@ public class TestRegionStateStore {
 
   @Test
   public void testVisitMetaForBadRegionState() throws Exception {
-    final TableName tableName = TableName.valueOf("testVisitMetaForBadRegionState");
+    final TableName tableName = name.getTableName();
     UTIL.createTable(tableName, "cf");
     final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
     final String encodedName = regions.get(0).getRegionInfo().getEncodedName();
-    final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster().
-        getAssignmentManager().getRegionStateStore();
+    final RegionStateStore regionStateStore =
+      UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
 
     // add the BAD_STATE which does not exist in enum RegionState.State
-    Put put = new Put(regions.get(0).getRegionInfo().getRegionName(),
-        EnvironmentEdgeManager.currentTime());
+    Put put =
+      new Put(regions.get(0).getRegionInfo().getRegionName(), EnvironmentEdgeManager.currentTime());
     put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
-        Bytes.toBytes("BAD_STATE"));
+      Bytes.toBytes("BAD_STATE"));
 
     try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
       table.put(put);
@@ -129,9 +128,8 @@ public class TestRegionStateStore {
     final AtomicBoolean visitorCalled = new AtomicBoolean(false);
     regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() {
       @Override
-      public void visitRegionState(Result result, RegionInfo regionInfo,
-                                   RegionState.State state, ServerName regionLocation,
-                                   ServerName lastHost, long openSeqNum) {
+      public void visitRegionState(Result result, RegionInfo regionInfo, RegionState.State state,
+        ServerName regionLocation, ServerName lastHost, long openSeqNum) {
         assertEquals(encodedName, regionInfo.getEncodedName());
         assertNull(state);
         visitorCalled.set(true);
@@ -143,8 +141,8 @@ public class TestRegionStateStore {
   @Test
   public void testVisitMetaForRegionNonExistingRegion() throws Exception {
     final String encodedName = "fakeencodedregionname";
-    final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster().
-      getAssignmentManager().getRegionStateStore();
+    final RegionStateStore regionStateStore =
+      UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
     final AtomicBoolean visitorCalled = new AtomicBoolean(false);
     regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() {
       @Override

[hbase] 03/09: HBASE-24390 Remove RegionInfoBuilder.FIRST_META_REGIONINFO (#1877)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9959293456dce780884d9c34ab6b5dd8435ee18a
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Wed Jul 1 16:14:21 2020 +0800

    HBASE-24390 Remove RegionInfoBuilder.FIRST_META_REGIONINFO (#1877)
    
    Signed-off-by: stack <st...@apache.org>
---
 .../master/balancer/TestBaseLoadBalancer.java      |   1 -
 .../hadoop/hbase/client/MutableRegionInfo.java     |   8 --
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |   7 -
 .../org/apache/hadoop/hbase/client/RegionInfo.java |   5 +-
 .../hadoop/hbase/client/RegionInfoBuilder.java     |  13 --
 .../hadoop/hbase/client/ZKConnectionRegistry.java  |  11 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |   8 +-
 .../client/TestAsyncRegionLocatorTracing.java      |   2 +-
 .../hadoop/hbase/client/TestRegionInfoBuilder.java |   6 +-
 .../org/apache/hadoop/hbase/ipc/TestIPCUtil.java   |   9 +-
 .../hadoop/hbase/DistributedHBaseCluster.java      |  13 ++
 .../org/apache/hadoop/hbase/master/HMaster.java    |  23 +--
 .../hbase/master/assignment/AssignmentManager.java | 118 ++-------------
 .../assignment/RegionRemoteProcedureBase.java      |   9 +-
 .../hbase/master/assignment/ServerStateNode.java   |   6 +
 .../assignment/TransitRegionStateProcedure.java    |  10 +-
 .../hbase/master/http/MasterStatusServlet.java     |  15 +-
 .../master/procedure/HBCKServerCrashProcedure.java |   2 +-
 .../hbase/master/procedure/InitMetaProcedure.java  |  13 +-
 .../master/procedure/MasterProcedureUtil.java      |  11 ++
 .../master/procedure/ServerCrashProcedure.java     |   8 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |   5 +-
 .../java/org/apache/hadoop/hbase/util/FSUtils.java |  11 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |  17 +--
 .../main/resources/hbase-webapps/master/table.jsp  |   7 +-
 .../apache/hadoop/hbase/HBaseClusterInterface.java |   5 +-
 .../hadoop/hbase/SingleProcessHBaseCluster.java    |  38 ++++-
 .../hadoop/hbase/TestClientClusterMetrics.java     |  13 +-
 .../org/apache/hadoop/hbase/TestHBaseMetaEdit.java |  10 +-
 .../apache/hadoop/hbase/TestHRegionLocation.java   |  24 ++--
 .../hbase/client/MetaWithReplicasTestBase.java     |  12 +-
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |   2 +-
 .../client/TestAsyncTableGetMultiThreaded.java     |   3 +-
 .../client/TestFailedMetaReplicaAssigment.java     |   8 +-
 .../client/TestMetaReplicasAddressChange.java      |   4 +-
 .../client/TestMetaTableAccessorNoCluster.java     |   6 +-
 .../hbase/client/TestSeparateClientZKCluster.java  |   6 +-
 .../TestRegionObserverPreFlushAndPreCompact.java   |   3 +-
 .../hadoop/hbase/master/AbstractTestDLS.java       |  11 +-
 .../hadoop/hbase/master/MockRegionServer.java      |   7 +-
 .../org/apache/hadoop/hbase/master/TestMaster.java |   3 +-
 .../hadoop/hbase/master/TestMasterQosFunction.java |   2 +-
 .../hbase/master/TestMetaShutdownHandler.java      |  38 +++--
 .../master/assignment/AssignmentTestingUtil.java   |  14 +-
 .../master/assignment/TestAssignmentManager.java   |   6 +-
 .../assignment/TestAssignmentManagerBase.java      |  11 +-
 .../master/assignment/TestAssignmentOnRSCrash.java |   4 -
 .../hbase/master/assignment/TestHbckChore.java     |   5 +-
 .../master/janitor/TestMetaFixerNoCluster.java     |  49 ++++---
 .../hbase/regionserver/TestCleanupMetaWAL.java     |   6 +-
 .../hbase/regionserver/TestDefaultMemStore.java    |   6 +-
 .../hbase/regionserver/TestDefaultStoreEngine.java |   9 +-
 .../regionserver/TestGetClosestAtOrBefore.java     |   4 +-
 .../hadoop/hbase/regionserver/TestPriorityRpc.java |  12 +-
 .../hbase/regionserver/TestRSRpcServices.java      |   4 +-
 .../TestReadAndWriteRegionInfoFile.java            |   2 +-
 .../hadoop/hbase/regionserver/TestRegionInfo.java  | 158 +++++++++------------
 .../TestRegionInfoStaticInitialization.java        |   2 +-
 .../hbase/regionserver/TestStripeStoreEngine.java  |   4 +-
 .../compactions/PerfTestCompactionPolicies.java    |   4 +-
 .../compactions/TestStripeCompactionPolicy.java    |   7 +-
 .../regionserver/wal/TestLogRollingNoCluster.java  |   9 +-
 .../TestReplicationWALEntryFilters.java            |   6 +-
 .../org/apache/hadoop/hbase/util/TestFSUtils.java  |  16 +--
 .../org/apache/hadoop/hbase/wal/TestWALSplit.java  |  17 ++-
 .../java/org/apache/hadoop/hbase/HBaseCluster.java |   6 +-
 .../org/apache/hadoop/hbase/MiniHBaseCluster.java  |  36 ++++-
 67 files changed, 443 insertions(+), 487 deletions(-)

diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
index 0c94a20..eff6ecb 100644
--- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
+++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
@@ -128,7 +128,6 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
   public void testBulkAssignment() throws Exception {
     List<ServerName> tmp = getListOfServerNames(randomServers(5, 0));
     List<RegionInfo> hris = randomRegions(20);
-    hris.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
     tmp.add(master);
     Map<ServerName, List<RegionInfo>> plans = loadBalancer.roundRobinAssignment(hris, tmp);
     int totalRegion = 0;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index 0aa301c..e2acd5e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -110,14 +110,6 @@ class MutableRegionInfo implements RegionInfo {
     return regionId;
   }
 
-  /**
-   * Package private constructor used constructing MutableRegionInfo for the first meta regions
-   */
-  MutableRegionInfo(long regionId, TableName tableName, int replicaId) {
-    this(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId,
-      replicaId, false);
-  }
-
   MutableRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
       final boolean split, final long regionId, final int replicaId, boolean offLine) {
     this.tableName = checkTableName(tableName);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index e691bb7..fc3da74 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -2426,13 +2426,6 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
       return failedFuture(new IllegalArgumentException("Passed region name can't be null"));
     }
 
-    if (Bytes.equals(regionNameOrEncodedRegionName,
-      RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) ||
-      Bytes.equals(regionNameOrEncodedRegionName,
-        RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
-      return CompletableFuture.completedFuture(RegionInfoBuilder.FIRST_META_REGIONINFO);
-    }
-
     CompletableFuture<RegionInfo> future = new CompletableFuture<>();
     addListener(getRegionLocation(regionNameOrEncodedRegionName), (location, err) -> {
       if (err != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
index 55a91db..f27f3ee 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
@@ -73,8 +73,9 @@ public interface RegionInfo extends Comparable<RegionInfo> {
   @Deprecated
   @InterfaceAudience.Private
   // Not using RegionInfoBuilder intentionally to avoid a static loading deadlock: HBASE-24896
-  RegionInfo UNDEFINED = new MutableRegionInfo(0, TableName.valueOf("__UNDEFINED__"),
-    RegionInfo.DEFAULT_REPLICA_ID);
+  RegionInfo UNDEFINED =
+    new MutableRegionInfo(TableName.valueOf("__UNDEFINED__"), HConstants.EMPTY_START_ROW,
+      HConstants.EMPTY_END_ROW, false, 0, RegionInfo.DEFAULT_REPLICA_ID, false);
 
   /**
    * Separator used to demarcate the encodedName in a region name
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index cc42b96..03434f3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -34,18 +34,6 @@ public class RegionInfoBuilder {
   public static final RegionInfo UNDEFINED =
     RegionInfoBuilder.newBuilder(TableName.valueOf("__UNDEFINED__")).build();
 
-  /**
-   * RegionInfo for first meta region
-   * You cannot use this builder to make an instance of the {@link #FIRST_META_REGIONINFO}.
-   * Just refer to this instance. Also, while the instance is actually a MutableRI, its type is
-   * just RI so the mutable methods are not available (unless you go casting); it appears
-   * as immutable (I tried adding Immutable type but it just makes a mess).
-   */
-  // TODO: How come Meta regions still do not have encoded region names? Fix.
-  // hbase:meta,,1.1588230740 should be the hbase:meta first region name.
-  public static final RegionInfo FIRST_META_REGIONINFO =
-    new MutableRegionInfo(1L, TableName.META_TABLE_NAME, RegionInfo.DEFAULT_REPLICA_ID);
-
   private final TableName tableName;
   private byte[] startKey = HConstants.EMPTY_START_ROW;
   private byte[] endKey = HConstants.EMPTY_END_ROW;
@@ -111,5 +99,4 @@ public class RegionInfoBuilder {
     return new MutableRegionInfo(tableName, startKey, endKey, split,
         regionId, replicaId, offLine);
   }
-
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
index bf93776..c79e9d6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
@@ -18,9 +18,6 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
-import static org.apache.hadoop.hbase.client.RegionInfoBuilder.FIRST_META_REGIONINFO;
-import static org.apache.hadoop.hbase.client.RegionReplicaUtil.getRegionInfoForDefaultReplica;
-import static org.apache.hadoop.hbase.client.RegionReplicaUtil.getRegionInfoForReplica;
 import static org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.lengthOfPBMagic;
 import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture;
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
@@ -36,6 +33,7 @@ import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.util.Pair;
@@ -161,7 +159,8 @@ class ZKConnectionRegistry implements ConnectionRegistry {
             LOG.warn("Meta region is in state " + stateAndServerName.getFirst());
           }
           locs[DEFAULT_REPLICA_ID] = new HRegionLocation(
-            getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
+            RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1).build(),
+            stateAndServerName.getSecond());
           tryComplete(remaining, locs, future);
         });
       } else {
@@ -183,8 +182,8 @@ class ZKConnectionRegistry implements ConnectionRegistry {
               locs[replicaId] = null;
             } else {
               locs[replicaId] =
-                new HRegionLocation(getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId),
-                  stateAndServerName.getSecond());
+                new HRegionLocation(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+                  .setRegionId(1).setReplicaId(replicaId).build(), stateAndServerName.getSecond());
             }
           }
           tryComplete(remaining, locs, future);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index b6918ca..138b469 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLoadStats;
 import org.apache.hadoop.hbase.client.RegionLocateType;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
@@ -3237,8 +3236,8 @@ public final class ProtobufUtil {
     if (serverName == null) {
       state = RegionState.State.OFFLINE;
     }
-    return new RegionState(RegionReplicaUtil.getRegionInfoForReplica(
-        RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName);
+    return new RegionState(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1)
+      .setReplicaId(replicaId).build(), state, serverName);
   }
 
   /**
@@ -3354,9 +3353,6 @@ public final class ProtobufUtil {
     long regionId = proto.getRegionId();
     int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
     int replicaId = proto.hasReplicaId()? proto.getReplicaId(): defaultReplicaId;
-    if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) {
-      return RegionInfoBuilder.FIRST_META_REGIONINFO;
-    }
     byte[] startKey = null;
     byte[] endKey = null;
     if (proto.hasStartKey()) {
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
index 180d294..55084c0 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
@@ -130,7 +130,7 @@ public class TestAsyncRegionLocatorTracing {
 
   @Before
   public void setUp() throws IOException {
-    RegionInfo metaRegionInfo = RegionInfoBuilder.FIRST_META_REGIONINFO;
+    RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
     locs = new RegionLocations(
       new HRegionLocation(metaRegionInfo,
         ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())),
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
index 3b66f7e..1c3af9e 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
@@ -84,10 +84,10 @@ public class TestRegionInfoBuilder {
 
   @Test
   public void testPb() throws DeserializationException {
-    RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
+    RegionInfo ri = RegionInfoBuilder.newBuilder(name.getTableName()).build();
     byte[] bytes = RegionInfo.toByteArray(ri);
     RegionInfo pbri = RegionInfo.parseFrom(bytes);
-    assertTrue(RegionInfo.COMPARATOR.compare(ri, pbri) == 0);
+    assertEquals(0, RegionInfo.COMPARATOR.compare(ri, pbri));
   }
 
   @Test
@@ -183,7 +183,7 @@ public class TestRegionInfoBuilder {
 
   @Test
   public void testMetaTables() {
-    assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isMetaRegion());
+    assertTrue(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().isMetaRegion());
   }
 
   @Test
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java
index 45da1e8..46107d1 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java
@@ -30,6 +30,8 @@ import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeoutException;
 import org.apache.commons.lang3.mutable.MutableInt;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
@@ -106,13 +108,12 @@ public class TestIPCUtil {
       if (exception instanceof TimeoutException) {
         assertThat(IPCUtil.wrapException(addr, null, exception), instanceOf(TimeoutIOException.class));
       } else {
-        IOException ioe = IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO,
-          exception);
+        RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+        IOException ioe = IPCUtil.wrapException(addr, ri, exception);
         // Assert that the exception contains the Region name if supplied. HBASE-25735.
         // Not all exceptions get the region stuffed into it.
         if (ioe.getMessage() != null) {
-          assertTrue(ioe.getMessage().
-            contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString()));
+          assertTrue(ioe.getMessage().contains(ri.getRegionNameAsString()));
         }
         assertThat(ioe, instanceOf(exception.getClass()));
       }
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 9fb7db9..885729c 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -307,6 +307,19 @@ public class DistributedHBaseCluster extends HBaseClusterInterface {
   }
 
   @Override
+  public ServerName getServerHoldingMeta() throws IOException {
+    HRegionLocation regionLoc = null;
+    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
+      regionLoc = locator.getRegionLocation(HConstants.EMPTY_START_ROW, true);
+    }
+    if (regionLoc == null) {
+      LOG.warn("Cannot find region server holding first meta region");
+      return null;
+    }
+    return regionLoc.getServerName();
+  }
+
+  @Override
   public ServerName getServerHoldingRegion(TableName tn, byte[] regionName) throws IOException {
     byte[] startKey = RegionInfo.getStartKey(regionName);
     HRegionLocation regionLoc = null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 171966f..4e84960 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -89,8 +89,8 @@ import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLocateType;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
@@ -816,13 +816,16 @@ public class HMaster extends HRegionServer implements MasterServices {
       }
     }
     // start migrating
-    byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
-    Put put = new Put(row);
+    Put put = null;
     List<String> metaReplicaNodes = zooKeeper.getMetaReplicaNodes();
     StringBuilder info = new StringBuilder("Migrating meta location:");
     for (String metaReplicaNode : metaReplicaNodes) {
       int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaNode);
       RegionState state = getMetaRegionState(zooKeeper, replicaId);
+      if (put == null) {
+        byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(state.getRegion());
+        put = new Put(row);
+      }
       info.append(" ").append(state);
       put.setTimestamp(state.getStamp());
       MetaTableAccessor.addRegionInfo(put, state.getRegion());
@@ -834,9 +837,10 @@ public class HMaster extends HRegionServer implements MasterServices {
         .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp())
         .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build());
     }
-    if (!put.isEmpty()) {
+    if (put != null) {
       LOG.info(info.toString());
-      masterRegion.update(r -> r.put(put));
+      final Put p = put;
+      masterRegion.update(r -> r.put(p));
     } else {
       LOG.info("No meta location avaiable on zookeeper, skip migrating...");
     }
@@ -1276,11 +1280,14 @@ public class HMaster extends HRegionServer implements MasterServices {
   /**
    * Check hbase:meta is up and ready for reading. For use during Master startup only.
    * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online
-   *   and we will hold here until operator intervention.
+   *         and we will hold here until operator intervention.
    */
   @InterfaceAudience.Private
-  public boolean waitForMetaOnline() {
-    return isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO);
+  public boolean waitForMetaOnline() throws InterruptedException {
+    Optional<RegionInfo> firstMetaRegion =
+      this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).stream()
+        .filter(RegionInfo::isFirst).filter(RegionReplicaUtil::isDefaultReplica).findFirst();
+    return firstMetaRegion.isPresent() ? isRegionOnline(firstMetaRegion.get()) : false;
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 73b6aa6..1786afe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -45,8 +45,6 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.DoNotRetryRegionException;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
@@ -153,7 +151,6 @@ public class AssignmentManager {
   private static final int DEFAULT_RIT_STUCK_WARNING_THRESHOLD = 60 * 1000;
   public static final String UNEXPECTED_STATE_REGION = "Unexpected state for ";
 
-  private final ProcedureEvent<?> metaAssignEvent = new ProcedureEvent<>("meta assign");
   private final ProcedureEvent<?> metaLoadEvent = new ProcedureEvent<>("meta load");
 
   private final MetricsAssignmentManager metrics;
@@ -279,10 +276,6 @@ public class AssignmentManager {
             if (regionLocation != null) {
               regionStates.addRegionToServer(regionNode);
             }
-            if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
-              setMetaAssigned(regionInfo, state == State.OPEN);
-            }
-
             if (regionInfo.isFirst()) {
               // for compatibility, mirror the meta region state to zookeeper
               try {
@@ -360,9 +353,6 @@ public class AssignmentManager {
     // Update meta events (for testing)
     if (hasProcExecutor) {
       metaLoadEvent.suspend();
-      for (RegionInfo hri: getMetaRegionSet()) {
-        setMetaAssigned(hri, false);
-      }
     }
   }
 
@@ -424,6 +414,14 @@ public class AssignmentManager {
     return serverInfo.getRegionInfoList();
   }
 
+  public List<RegionInfo> getDefaultMetaRegionsOnServer(ServerName serverName) {
+    ServerStateNode serverInfo = regionStates.getServerNode(serverName);
+    if (serverInfo == null) {
+      return Collections.emptyList();
+    }
+    return serverInfo.getDefaultMetaRegionInfoList();
+  }
+
   public RegionStateStore getRegionStateStore() {
     return regionStateStore;
   }
@@ -453,95 +451,22 @@ public class AssignmentManager {
   // ============================================================================================
   //  META Helpers
   // ============================================================================================
-  private boolean isMetaRegion(final RegionInfo regionInfo) {
-    return regionInfo.isMetaRegion();
-  }
-
-  public boolean isMetaRegion(final byte[] regionName) {
-    return getMetaRegionFromName(regionName) != null;
-  }
-
-  public RegionInfo getMetaRegionFromName(final byte[] regionName) {
-    for (RegionInfo hri: getMetaRegionSet()) {
-      if (Bytes.equals(hri.getRegionName(), regionName)) {
-        return hri;
-      }
-    }
-    return null;
-  }
-
-  public boolean isCarryingMeta(final ServerName serverName) {
-    // TODO: handle multiple meta
-    return isCarryingRegion(serverName, RegionInfoBuilder.FIRST_META_REGIONINFO);
-  }
-
-  private boolean isCarryingRegion(final ServerName serverName, final RegionInfo regionInfo) {
-    // TODO: check for state?
-    final RegionStateNode node = regionStates.getRegionStateNode(regionInfo);
-    return(node != null && serverName.equals(node.getRegionLocation()));
-  }
-
-  private RegionInfo getMetaForRegion(final RegionInfo regionInfo) {
-    //if (regionInfo.isMetaRegion()) return regionInfo;
-    // TODO: handle multiple meta. if the region provided is not meta lookup
-    // which meta the region belongs to.
-    return RegionInfoBuilder.FIRST_META_REGIONINFO;
-  }
-
-  // TODO: handle multiple meta.
-  private static final Set<RegionInfo> META_REGION_SET =
-      Collections.singleton(RegionInfoBuilder.FIRST_META_REGIONINFO);
-  public Set<RegionInfo> getMetaRegionSet() {
-    return META_REGION_SET;
+  public boolean isCarryingMeta(ServerName serverName) {
+    return regionStates.getTableRegionStateNodes(TableName.META_TABLE_NAME).stream()
+      .map(RegionStateNode::getRegionLocation).anyMatch(serverName::equals);
   }
 
   // ============================================================================================
   //  META Event(s) helpers
   // ============================================================================================
-  /**
-   * Notice that, this only means the meta region is available on a RS, but the AM may still be
-   * loading the region states from meta, so usually you need to check {@link #isMetaLoaded()} first
-   * before checking this method, unless you can make sure that your piece of code can only be
-   * executed after AM builds the region states.
-   * @see #isMetaLoaded()
-   */
-  public boolean isMetaAssigned() {
-    return metaAssignEvent.isReady();
-  }
-
   public boolean isMetaRegionInTransition() {
-    return !isMetaAssigned();
-  }
-
-  /**
-   * Notice that this event does not mean the AM has already finished region state rebuilding. See
-   * the comment of {@link #isMetaAssigned()} for more details.
-   * @see #isMetaAssigned()
-   */
-  public boolean waitMetaAssigned(Procedure<?> proc, RegionInfo regionInfo) {
-    return getMetaAssignEvent(getMetaForRegion(regionInfo)).suspendIfNotReady(proc);
-  }
-
-  private void setMetaAssigned(RegionInfo metaRegionInfo, boolean assigned) {
-    assert isMetaRegion(metaRegionInfo) : "unexpected non-meta region " + metaRegionInfo;
-    ProcedureEvent<?> metaAssignEvent = getMetaAssignEvent(metaRegionInfo);
-    if (assigned) {
-      metaAssignEvent.wake(getProcedureScheduler());
-    } else {
-      metaAssignEvent.suspend();
-    }
-  }
-
-  private ProcedureEvent<?> getMetaAssignEvent(RegionInfo metaRegionInfo) {
-    assert isMetaRegion(metaRegionInfo) : "unexpected non-meta region " + metaRegionInfo;
-    // TODO: handle multiple meta.
-    return metaAssignEvent;
+    return regionStates.getRegionsInTransition().stream().map(RegionStateNode::getRegionInfo)
+      .anyMatch(RegionInfo::isMetaRegion);
   }
 
   /**
    * Wait until AM finishes the meta loading, i.e, the region states rebuilding.
    * @see #isMetaLoaded()
-   * @see #waitMetaAssigned(Procedure, RegionInfo)
    */
   public boolean waitMetaLoaded(Procedure<?> proc) {
     return metaLoadEvent.suspendIfNotReady(proc);
@@ -562,7 +487,6 @@ public class AssignmentManager {
 
   /**
    * Return whether AM finishes the meta loading, i.e, the region states rebuilding.
-   * @see #isMetaAssigned()
    * @see #waitMetaLoaded(Procedure)
    */
   public boolean isMetaLoaded() {
@@ -1695,7 +1619,7 @@ public class AssignmentManager {
     if (!isRunning()) {
       throw new PleaseHoldException("AssignmentManager not running");
     }
-    boolean meta = isMetaRegion(hri);
+    boolean meta = hri.isMetaRegion();
     boolean metaLoaded = isMetaLoaded();
     if (!meta && !metaLoaded) {
       throw new PleaseHoldException(
@@ -1927,12 +1851,6 @@ public class AssignmentManager {
   // should be called under the RegionStateNode lock
   void regionClosing(RegionStateNode regionNode) throws IOException {
     transitStateAndUpdate(regionNode, State.CLOSING, STATES_EXPECTED_ON_CLOSING);
-
-    RegionInfo hri = regionNode.getRegionInfo();
-    // Set meta has not initialized early. so people trying to create/edit tables will wait
-    if (isMetaRegion(hri)) {
-      setMetaAssigned(hri, false);
-    }
     regionStates.addRegionToServer(regionNode);
     // update the operation count metrics
     metrics.incrementOperationCounter();
@@ -1988,14 +1906,6 @@ public class AssignmentManager {
 
   void persistToMeta(RegionStateNode regionNode) throws IOException {
     regionStateStore.updateRegionLocation(regionNode);
-    RegionInfo regionInfo = regionNode.getRegionInfo();
-    if (isMetaRegion(regionInfo) && regionNode.getState() == State.OPEN) {
-      // Usually we'd set a table ENABLED at this stage but hbase:meta is ALWAYs enabled, it
-      // can't be disabled -- so skip the RPC (besides... enabled is managed by TableStateManager
-      // which is backed by hbase:meta... Avoid setting ENABLED to avoid having to update state
-      // on table that contains state.
-      setMetaAssigned(regionInfo, true);
-    }
   }
 
   // ============================================================================================
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java
index 805b51c..8eebc4e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
 import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
 import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException;
 import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -148,13 +149,7 @@ public abstract class RegionRemoteProcedureBase extends Procedure<MasterProcedur
 
   @Override
   protected boolean waitInitialized(MasterProcedureEnv env) {
-    if (TableName.isMetaTableName(getTableName())) {
-      return false;
-    }
-    // First we need meta to be loaded, and second, if meta is not online then we will likely to
-    // fail when updating meta so we wait until it is assigned.
-    AssignmentManager am = env.getAssignmentManager();
-    return am.waitMetaLoaded(this) || am.waitMetaAssigned(this, region);
+    return MasterProcedureUtil.waitInitialized(this, env, getTableName());
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java
index 33f6b1a..30feab4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java
@@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -73,6 +74,11 @@ public class ServerStateNode implements Comparable<ServerStateNode> {
     return regions.stream().map(RegionStateNode::getRegionInfo).collect(Collectors.toList());
   }
 
+  public List<RegionInfo> getDefaultMetaRegionInfoList() {
+    return regions.stream().map(RegionStateNode::getRegionInfo).filter(RegionInfo::isMetaRegion)
+      .filter(RegionReplicaUtil::isDefaultReplica).collect(Collectors.toList());
+  }
+
   public List<RegionInfo> getSystemRegionInfoList() {
     return regions.stream().filter(RegionStateNode::isSystemTable)
       .map(RegionStateNode::getRegionInfo).collect(Collectors.toList());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
index 8ca1ee4..69b48aa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
@@ -21,7 +21,6 @@ import edu.umd.cs.findbugs.annotations.Nullable;
 import java.io.IOException;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RetriesExhaustedException;
@@ -29,6 +28,7 @@ import org.apache.hadoop.hbase.master.MetricsAssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
@@ -163,13 +163,7 @@ public class TransitRegionStateProcedure
 
   @Override
   protected boolean waitInitialized(MasterProcedureEnv env) {
-    if (TableName.isMetaTableName(getTableName())) {
-      return false;
-    }
-    // First we need meta to be loaded, and second, if meta is not online then we will likely to
-    // fail when updating meta so we wait until it is assigned.
-    AssignmentManager am = env.getAssignmentManager();
-    return am.waitMetaLoaded(this) || am.waitMetaAssigned(this, getRegion());
+    return MasterProcedureUtil.waitInitialized(this, env, getTableName());
   }
 
   private void queueAssign(MasterProcedureEnv env, RegionStateNode regionNode)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java
index 3d00e49..caf6858 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java
@@ -26,8 +26,11 @@ import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocateType;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
@@ -81,9 +84,13 @@ public class MasterStatusServlet extends HttpServlet {
     tmpl.render(response.getWriter(), master);
   }
 
-  private ServerName getMetaLocationOrNull(HMaster master) {
-    return master.getAssignmentManager().getRegionStates()
-      .getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO).getServerName();
+  private ServerName getMetaLocationOrNull(HMaster master) throws IOException {
+    RegionLocations locs = master.locateMeta(HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT);
+    if (locs == null) {
+      return null;
+    }
+    HRegionLocation loc = locs.getDefaultRegionLocation();
+    return loc != null ? loc.getServerName() : null;
   }
 
   private Map<String, Integer> getFragmentationInfo(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
index 725a138..28e1901 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
@@ -84,7 +84,7 @@ public class HBCKServerCrashProcedure extends ServerCrashProcedure {
   @Override
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH_EXCEPTION",
     justification="FindBugs seems confused on ps in below.")
-  List<RegionInfo> getRegionsOnCrashedServer(MasterProcedureEnv env) {
+  protected List<RegionInfo> getRegionsOnCrashedServer(MasterProcedureEnv env) {
     // Super will return an immutable list (empty if nothing on this server).
     List<RegionInfo> ris = super.getRegionsOnCrashedServer(env);
     if (!ris.isEmpty()) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index e92fc11..05c5b85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
@@ -57,6 +58,14 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMe
 
   private static final Logger LOG = LoggerFactory.getLogger(InitMetaProcedure.class);
 
+  /**
+   * Used to create meta table when bootstraping a new hbase cluster.
+   * <p/>
+   * Setting region id to 1 is for keeping compatible with old clients.
+   */
+  private static final RegionInfo BOOTSTRAP_META_REGIONINFO =
+    RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1).build();
+
   private CountDownLatch latch = new CountDownLatch(1);
 
   private RetryCounter retryCounter;
@@ -85,7 +94,7 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMe
     TableDescriptor metaDescriptor =
       FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir);
     HRegion
-      .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null)
+      .createHRegion(BOOTSTRAP_META_REGIONINFO, rootDir, conf, metaDescriptor, null)
       .close();
     return metaDescriptor;
   }
@@ -106,7 +115,7 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMe
         case INIT_META_ASSIGN_META:
           LOG.info("Going to assign meta");
           addChildProcedure(env.getAssignmentManager()
-            .createAssignProcedures(Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO)));
+            .createAssignProcedures(Arrays.asList(BOOTSTRAP_META_REGIONINFO)));
           setNextState(InitMetaState.INIT_META_CREATE_NAMESPACES);
           return Flow.HAS_MORE_STATE;
         case INIT_META_CREATE_NAMESPACES:
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
index c6e77fd..0766b5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureException;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@@ -241,4 +242,14 @@ public final class MasterProcedureUtil {
     return Optional
       .ofNullable(namespaceDesc.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP));
   }
+
+  public static boolean waitInitialized(Procedure<MasterProcedureEnv> proc, MasterProcedureEnv env,
+    TableName tableName) {
+    if (TableName.isMetaTableName(tableName)) {
+      return false;
+    }
+    // we need meta to be loaded
+    AssignmentManager am = env.getAssignmentManager();
+    return am.waitMetaLoaded(proc);
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 49449e3..3722391 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -20,12 +20,10 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED
 import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -184,7 +182,9 @@ public class ServerCrashProcedure
           }
           break;
         case SERVER_CRASH_ASSIGN_META:
-          assignRegions(env, Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO));
+          // notice that, here we will only assign the primary meta regions, secondary meta replicas
+          // will be assigned below
+          assignRegions(env, env.getAssignmentManager().getDefaultMetaRegionsOnServer(serverName));
           setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
           break;
         case SERVER_CRASH_GET_REGIONS:
@@ -263,7 +263,7 @@ public class ServerCrashProcedure
   /**
    * @return List of Regions on crashed server.
    */
-  List<RegionInfo> getRegionsOnCrashedServer(MasterProcedureEnv env) {
+  protected List<RegionInfo> getRegionsOnCrashedServer(MasterProcedureEnv env) {
     return env.getMasterServices().getAssignmentManager().getRegionsOnServer(serverName);
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index ae602a6..bd0b423 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -92,7 +92,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.client.locking.LockServiceClient;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
@@ -1232,7 +1232,8 @@ public class HRegionServer extends Thread implements
   }
 
   private boolean containsMetaTableRegions() {
-    return onlineRegions.containsKey(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
+    return onlineRegions.values().stream().map(Region::getRegionInfo)
+      .anyMatch(ri -> ri.isMetaRegion() && RegionReplicaUtil.isDefaultReplica(ri));
   }
 
   private boolean areAllUserRegionsOffline() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index b32d497..d876117 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.HFileLink;
@@ -392,7 +391,7 @@ public final class FSUtils {
     String version = getVersion(fs, rootdir);
     String msg;
     if (version == null) {
-      if (!metaRegionExists(fs, rootdir)) {
+      if (!metaTableExists(fs, rootdir)) {
         // rootDir is empty (no version file and no root region)
         // just create new version file (HBASE-1195)
         setVersion(fs, rootdir, wait, retries);
@@ -693,14 +692,14 @@ public final class FSUtils {
   }
 
   /**
-   * Checks if meta region exists
+   * Checks if meta table exists
    * @param fs file system
    * @param rootDir root directory of HBase installation
    * @return true if exists
    */
-  public static boolean metaRegionExists(FileSystem fs, Path rootDir) throws IOException {
-    Path metaRegionDir = getRegionDirFromRootDir(rootDir, RegionInfoBuilder.FIRST_META_REGIONINFO);
-    return fs.exists(metaRegionDir);
+  private static boolean metaTableExists(FileSystem fs, Path rootDir) throws IOException {
+    Path metaTableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME);
+    return fs.exists(metaTableDir);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 28f0d5e..343ddcc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -2717,19 +2717,10 @@ public class HBaseFsck extends Configured implements Closeable {
       zkw.getZNodePaths().getZNodeForReplica(hi.getMetaEntry().getRegionInfo().getReplicaId()));
   }
 
-  private void assignMetaReplica(int replicaId)
-      throws IOException, KeeperException, InterruptedException {
-    errors.reportError(ERROR_CODE.NO_META_REGION, "hbase:meta, replicaId " +
-        replicaId +" is not found on any region.");
-    if (shouldFixAssignments()) {
-      errors.print("Trying to fix a problem with hbase:meta..");
-      setShouldRerun();
-      // try to fix it (treat it as unassigned region)
-      RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(
-          RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId);
-      HBaseFsckRepair.fixUnassigned(admin, h);
-      HBaseFsckRepair.waitUntilAssigned(admin, h);
-    }
+  private void assignMetaReplica(int replicaId) {
+    errors.reportError(ERROR_CODE.NO_META_REGION,
+      "hbase:meta, replicaId " + replicaId + " is not found on any region.");
+    throw new UnsupportedOperationException("fix meta region is not allowed");
   }
 
   /**
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 29913b5..df631c0 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -312,7 +312,7 @@
           // NOTE: Presumes meta with one or more replicas
           for (int j = 0; j < numMetaReplicas; j++) {
             RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
-                                    RegionInfoBuilder.FIRST_META_REGIONINFO, j);
+              RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(), j);
             RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta);
             // If a metaLocation is null, All of its info would be empty here to be displayed.
             ServerName metaLocation = regionState != null ? regionState.getServerName() : null;
@@ -380,7 +380,7 @@
            // NOTE: Presumes meta with one or more replicas
            for (int j = 0; j < numMetaReplicas; j++) {
              RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
-                                     RegionInfoBuilder.FIRST_META_REGIONINFO, j);
+               RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(), j);
              RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta);
              // If a metaLocation is null, All of its info would be empty here to be displayed.
              ServerName metaLocation = regionState != null ? regionState.getServerName() : null;
@@ -431,9 +431,8 @@
           // NOTE: Presumes meta with one or more replicas
           for (int j = 0; j < numMetaReplicas; j++) {
             RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
-                                    RegionInfoBuilder.FIRST_META_REGIONINFO, j);
+              RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(), j);
             RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta);
-            // If a metaLocation is null, All of its info would be empty here to be displayed.
             ServerName metaLocation = regionState != null ? regionState.getServerName() : null;
             for (int i = 0; i < 1; i++) {
               //If metaLocation is null, default value below would be displayed in UI.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java
index d43e62b..7f3e6f5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java
@@ -353,10 +353,7 @@ public abstract class HBaseClusterInterface implements Closeable, Configurable {
   /**
    * Get the ServerName of region server serving the first hbase:meta region
    */
-  public ServerName getServerHoldingMeta() throws IOException {
-    return getServerHoldingRegion(TableName.META_TABLE_NAME,
-      RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
-  }
+  public abstract ServerName getServerHoldingMeta() throws IOException;
 
   /**
    * Get the ServerName of region server serving the specified region
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java
index dc899e0..f225056 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java
@@ -26,7 +26,8 @@ import java.util.List;
 import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
@@ -799,11 +800,24 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
   }
 
   /**
-   * Returns index into List of {@link SingleProcessHBaseCluster#getRegionServerThreads()} of HRS
-   * carrying regionName. Returns -1 if none found.
+   * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()}
+   * of HRS carrying regionName. Returns -1 if none found.
    */
   public int getServerWithMeta() {
-    return getServerWith(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
+    int index = 0;
+    for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
+      HRegionServer hrs = rst.getRegionServer();
+      if (!hrs.isStopped()) {
+        for (Region region : hrs.getRegions(TableName.META_TABLE_NAME)) {
+          RegionInfo ri = region.getRegionInfo();
+          if (ri.isFirst() && RegionReplicaUtil.isDefaultReplica(ri)) {
+            return index;
+          }
+        }
+      }
+      index++;
+    }
+    return -1;
   }
 
   /**
@@ -828,6 +842,22 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface {
   }
 
   @Override
+  public ServerName getServerHoldingMeta() throws IOException {
+    for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
+      HRegionServer hrs = rst.getRegionServer();
+      if (!hrs.isStopped()) {
+        for (Region region : hrs.getRegions(TableName.META_TABLE_NAME)) {
+          RegionInfo ri = region.getRegionInfo();
+          if (ri.isFirst() && RegionReplicaUtil.isDefaultReplica(ri)) {
+            return hrs.getServerName();
+          }
+        }
+      }
+    }
+    return null;
+  }
+
+  @Override
   public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName)
     throws IOException {
     // Assume there is only one master thread which is the active master.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
index 6aef56b..ee11643 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
@@ -25,7 +25,6 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.atomic.AtomicInteger;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.Waiter.Predicate;
@@ -37,7 +36,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
@@ -395,11 +393,12 @@ public class TestClientClusterMetrics {
 
   private RegionMetrics getMetaMetrics() throws IOException {
     for (ServerMetrics serverMetrics : ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
-        .getLiveServerMetrics().values()) {
-      RegionMetrics metaMetrics = serverMetrics.getRegionMetrics()
-          .get(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
-      if (metaMetrics != null) {
-        return metaMetrics;
+      .getLiveServerMetrics().values()) {
+      for (RegionMetrics metrics : serverMetrics.getRegionMetrics().values()) {
+        if (CatalogFamilyFormat.parseRegionInfoFromRegionName(metrics.getRegionName())
+          .isMetaRegion()) {
+          return metrics;
+        }
       }
     }
     Assert.fail("Should have find meta metrics");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java
index fb3ea93..448757e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java
@@ -27,7 +27,6 @@ import java.util.Collections;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -116,11 +115,12 @@ public class TestHBaseMetaEdit {
     String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().
         get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
     assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
-    Region r = UTIL.getHBaseCluster().getRegionServer(0).
-        getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
+    Region r =
+      UTIL.getHBaseCluster().getRegionServer(0).getRegions(TableName.META_TABLE_NAME).get(0);
     assertEquals(oldVersions + 1,
-        r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
-    encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().
+      r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
+    encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor()
+      .
         getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
     assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
     assertTrue(r.getStore(extraColumnFamilyName) != null);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java
index 038ced6..9f1e54d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHRegionLocation.java
@@ -22,16 +22,21 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Category({ MiscTests.class, SmallTests.class })
 public class TestHRegionLocation {
 
+  private static final Logger LOG = LoggerFactory.getLogger(TestHRegionLocation.class);
+
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
     HBaseClassTestRule.forClass(TestHRegionLocation.class);
@@ -43,17 +48,18 @@ public class TestHRegionLocation {
   @Test
   public void testHashAndEqualsCode() {
     ServerName hsa1 = ServerName.valueOf("localhost", 1234, -1L);
-    HRegionLocation hrl1 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1);
-    HRegionLocation hrl2 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1);
+    RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+    HRegionLocation hrl1 = new HRegionLocation(ri, hsa1);
+    HRegionLocation hrl2 = new HRegionLocation(ri, hsa1);
     assertEquals(hrl1.hashCode(), hrl2.hashCode());
     assertTrue(hrl1.equals(hrl2));
-    HRegionLocation hrl3 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1);
+    HRegionLocation hrl3 = new HRegionLocation(ri, hsa1);
     assertNotSame(hrl1, hrl3);
     // They are equal because they have same location even though they are
     // carrying different regions or timestamp.
     assertTrue(hrl1.equals(hrl3));
     ServerName hsa2 = ServerName.valueOf("localhost", 12345, -1L);
-    HRegionLocation hrl4 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa2);
+    HRegionLocation hrl4 = new HRegionLocation(ri, hsa2);
     // These have same HRI but different locations so should be different.
     assertFalse(hrl3.equals(hrl4));
     HRegionLocation hrl5 =
@@ -64,17 +70,19 @@ public class TestHRegionLocation {
   @Test
   public void testToString() {
     ServerName hsa1 = ServerName.valueOf("localhost", 1234, -1L);
-    HRegionLocation hrl1 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1);
-    System.out.println(hrl1.toString());
+    RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+    HRegionLocation hrl1 = new HRegionLocation(ri, hsa1);
+    LOG.info(hrl1.toString());
   }
 
   @SuppressWarnings("SelfComparison")
   @Test
   public void testCompareTo() {
+    RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
     ServerName hsa1 = ServerName.valueOf("localhost", 1234, -1L);
-    HRegionLocation hsl1 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa1);
+    HRegionLocation hsl1 = new HRegionLocation(ri, hsa1);
     ServerName hsa2 = ServerName.valueOf("localhost", 1235, -1L);
-    HRegionLocation hsl2 = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, hsa2);
+    HRegionLocation hsl2 = new HRegionLocation(ri, hsa2);
     assertEquals(0, hsl1.compareTo(hsl1));
     assertEquals(0, hsl2.compareTo(hsl2));
     int compare1 = hsl1.compareTo(hsl2);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
index 3b0fbe8..899e417 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
@@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
 import org.apache.hadoop.hbase.TableName;
@@ -65,16 +66,18 @@ public class MetaWithReplicasTestBase {
     HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3);
     AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
     Set<ServerName> sns = new HashSet<ServerName>();
+    RegionInfo metaRegionInfo;
     ServerName hbaseMetaServerName;
     try (RegionLocator locator =
       TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
-      hbaseMetaServerName = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getServerName();
+      HRegionLocation loc = locator.getRegionLocation(HConstants.EMPTY_START_ROW);
+      metaRegionInfo = loc.getRegion();
+      hbaseMetaServerName = loc.getServerName();
     }
     LOG.info("HBASE:META DEPLOY: on " + hbaseMetaServerName);
     sns.add(hbaseMetaServerName);
     for (int replicaId = 1; replicaId < 3; replicaId++) {
-      RegionInfo h = RegionReplicaUtil
-        .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId);
+      RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, replicaId);
       AssignmentTestingUtil.waitForAssignment(am, h);
       ServerName sn = am.getRegionStates().getRegionServerOfRegion(h);
       assertNotNull(sn);
@@ -98,8 +101,7 @@ public class MetaWithReplicasTestBase {
       ServerName metaServerName =
         TEST_UTIL.getHBaseCluster().getRegionServer(metaServerIndex).getServerName();
       assertNotEquals(destinationServerName, metaServerName);
-      TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
-        destinationServerName);
+      TEST_UTIL.getAdmin().move(metaRegionInfo.getEncodedNameAsBytes(), destinationServerName);
     }
     // Disable the balancer
     LoadBalancerTracker l =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index f967709..fca0394 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -704,7 +704,7 @@ public class TestAdmin2 extends TestAdminBase {
     testGetWithRegionName(sn, ri, ri.getEncodedNameAsBytes());
     testGetWithRegionName(sn, ri, ri.getRegionName());
     // Try querying meta encoded name.
-    ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
+    ri = ADMIN.getRegions(TableName.META_TABLE_NAME).get(0);
     testGetWithRegionName(sn, ri, ri.getEncodedNameAsBytes());
     testGetWithRegionName(sn, ri, ri.getRegionName());
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index ca11ea6..377f30f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -204,7 +204,8 @@ public class TestAsyncTableGetMultiThreaded {
           .map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer))
           .findAny().get();
       LOG.info("====== Moving meta from {} to {} ======", metaServer, newMetaServer);
-      admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), newMetaServer);
+      RegionInfo meta = admin.getRegions(TableName.META_TABLE_NAME).get(0);
+      admin.move(meta.getEncodedNameAsBytes(), newMetaServer);
       LOG.info("====== Move meta done ======");
       Thread.sleep(5000);
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
index fde362c..4409d29 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -81,9 +82,9 @@ public class TestFailedMetaReplicaAssigment {
     TEST_UTIL.waitFor(30000, () -> master.isInitialized());
 
     AssignmentManager am = master.getAssignmentManager();
+    RegionInfo metaHri = am.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).get(0);
     // showing one of the replicas got assigned
-    RegionInfo metaReplicaHri =
-      RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, 1);
+    RegionInfo metaReplicaHri = RegionReplicaUtil.getRegionInfoForReplica(metaHri, 1);
     // we use assignAsync so we need to wait a bit
     TEST_UTIL.waitFor(30000, () -> {
       RegionStateNode metaReplicaRegionNode =
@@ -91,8 +92,7 @@ public class TestFailedMetaReplicaAssigment {
       return metaReplicaRegionNode.getRegionLocation() != null;
     });
     // showing one of the replicas failed to be assigned
-    RegionInfo metaReplicaHri2 =
-      RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, 2);
+    RegionInfo metaReplicaHri2 = RegionReplicaUtil.getRegionInfoForReplica(metaHri, 2);
     RegionStateNode metaReplicaRegionNode2 =
       am.getRegionStates().getOrCreateRegionStateNode(metaReplicaHri2);
     // wait for several seconds to make sure that it is not assigned
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java
index fe10584..f2bf5c9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java
@@ -77,8 +77,8 @@ public class TestMetaReplicasAddressChange extends MetaWithReplicasTestBase {
     final TableName tableName = name.getTableName();
     TEST_UTIL.createTable(tableName, "f");
     assertTrue(TEST_UTIL.getAdmin().tableExists(tableName));
-    TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
-      moveToServer);
+    RegionInfo metaRegionInfo = TEST_UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).get(0);
+    TEST_UTIL.getAdmin().move(metaRegionInfo.getEncodedNameAsBytes(), moveToServer);
     assertNotEquals(currentServer, moveToServer);
     LOG.debug("CurrentServer={}, moveToServer={}", currentServer, moveToServer);
     TEST_UTIL.waitFor(60000, () -> {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
index e859e72..283a99c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.junit.After;
@@ -80,10 +81,11 @@ public class TestMetaTableAccessorNoCluster {
     assertTrue(hri == null);
     // OK, give it what it expects
     kvs.clear();
+    RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
     kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER,
-      RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO)));
+      RegionInfo.toByteArray(metaRegionInfo)));
     hri = CatalogFamilyFormat.getRegionInfo(Result.create(kvs));
     assertNotNull(hri);
-    assertTrue(RegionInfo.COMPARATOR.compare(hri, RegionInfoBuilder.FIRST_META_REGIONINFO) == 0);
+    assertTrue(RegionInfo.COMPARATOR.compare(hri, metaRegionInfo) == 0);
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
index 24a9c51..372dade 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
@@ -177,7 +177,8 @@ public class TestSeparateClientZKCluster {
           break;
         }
       }
-      admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), destServerName);
+      RegionInfo metaRegion = admin.getRegions(TableName.META_TABLE_NAME).get(0);
+      admin.move(metaRegion.getEncodedNameAsBytes(), destServerName);
       LOG.debug("Finished moving meta");
       // invalidate client cache
       RegionInfo region = locator.getRegionLocation(row).getRegion();
@@ -215,6 +216,7 @@ public class TestSeparateClientZKCluster {
       Put put = new Put(row);
       put.addColumn(family, qualifier, value);
       table.put(put);
+      RegionInfo metaRegion = admin.getRegions(TableName.META_TABLE_NAME).get(0);
       // invalid connection cache
       conn.clearRegionLocationCache();
       // stop client zk cluster
@@ -230,7 +232,7 @@ public class TestSeparateClientZKCluster {
       }
       // wait for meta region online
       AssignmentTestingUtil.waitForAssignment(cluster.getMaster().getAssignmentManager(),
-        RegionInfoBuilder.FIRST_META_REGIONINFO);
+        metaRegion);
       // wait some long time to make sure we will retry sync data to client ZK until data set
       Thread.sleep(10000);
       clientZkCluster.startup(clientZkDir);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java
index 734d4e0..ef8bc1b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java
@@ -24,6 +24,7 @@ import java.util.Optional;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -109,7 +110,7 @@ public class TestRegionObserverPreFlushAndPreCompact {
     // Make up an HRegion instance. Use the hbase:meta first region as our RegionInfo. Use
     // hbase:meta table name for building the TableDescriptor our mock returns when asked schema
     // down inside RegionCoprocessorHost. Pass in mocked RegionServerServices too.
-    RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
+    RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
     HRegion mockedHRegion = Mockito.mock(HRegion.class);
     Mockito.when(mockedHRegion.getRegionInfo()).thenReturn(ri);
     TableDescriptor td = TableDescriptorBuilder.newBuilder(ri.getTable()).build();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
index 230b5cd..4d77419 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.SplitLogCounters.tot_mgr_wait_for_zk_delet
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -49,7 +50,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
@@ -78,6 +78,7 @@ import org.junit.Test;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 /**
@@ -206,7 +207,7 @@ public abstract class AbstractTestDLS {
       LOG.info("Current Open Regions After Master Node Starts Up:" +
           HBaseTestingUtil.getAllOnlineRegions(cluster).size());
 
-      assertEquals(numLogLines, TEST_UTIL.countRows(ht));
+      assertEquals(numLogLines, HBaseTestingUtil.countRows(ht));
     }
   }
 
@@ -242,7 +243,7 @@ public abstract class AbstractTestDLS {
       TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
       int rows;
       try {
-        rows = TEST_UTIL.countRows(table);
+        rows = HBaseTestingUtil.countRows(table);
       } catch (Exception e) {
         Threads.printThreadInfo(System.out, "Thread dump before fail");
         throw e;
@@ -414,9 +415,7 @@ public abstract class AbstractTestDLS {
 
   public void makeWAL(HRegionServer hrs, List<RegionInfo> regions, int numEdits, int editSize,
       boolean cleanShutdown) throws IOException {
-    // remove root and meta region
-    regions.remove(RegionInfoBuilder.FIRST_META_REGIONINFO);
-
+    // remove meta and system regions
     for (Iterator<RegionInfo> iter = regions.iterator(); iter.hasNext();) {
       RegionInfo regionInfo = iter.next();
       if (regionInfo.getTable().isSystemTable()) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 69a7a79..ddb21f0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -430,10 +430,11 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
   }
 
   @Override
-  public GetRegionInfoResponse getRegionInfo(RpcController controller,
-      GetRegionInfoRequest request) throws ServiceException {
+  public GetRegionInfoResponse getRegionInfo(RpcController controller, GetRegionInfoRequest request)
+    throws ServiceException {
     GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
-    builder.setRegionInfo(ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO));
+    builder.setRegionInfo(ProtobufUtil.toRegionInfo(
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1).build()));
     return builder.build();
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 0f09172..3a46d62 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -177,7 +177,8 @@ public class TestMaster {
     HMaster m = cluster.getMaster();
     try {
       m.setInitialized(false); // fake it, set back later
-      RegionInfo meta = RegionInfoBuilder.FIRST_META_REGIONINFO;
+      RegionInfo meta = m.getAssignmentManager().getRegionStates()
+        .getRegionsOfTable(TableName.META_TABLE_NAME).get(0);
       m.move(meta.getEncodedNameAsBytes(), null);
       fail("Region should not be moved since master is not initialized");
     } catch (IOException ioe) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java
index 9f46ca2..cf2e99a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java
@@ -67,7 +67,7 @@ public class TestMasterQosFunction extends QosTestHelper {
   public void testRegionInTransition() throws IOException {
     // Check ReportRegionInTransition
     HBaseProtos.RegionInfo meta_ri =
-      ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO);
+      ProtobufUtil.toRegionInfo(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build());
     HBaseProtos.RegionInfo normal_ri =
       ProtobufUtil.toRegionInfo(RegionInfoBuilder.newBuilder(TableName.valueOf("test:table"))
         .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
index 29bafe3..d8d35d8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
@@ -28,8 +28,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
 import org.apache.hadoop.hbase.SingleProcessHBaseCluster.MiniHBaseClusterRegionServer;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -71,13 +72,10 @@ public class TestMetaShutdownHandler {
   }
 
   /**
-   * This test will test the expire handling of a meta-carrying
-   * region server.
-   * After HBaseMiniCluster is up, we will delete the ephemeral
-   * node of the meta-carrying region server, which will trigger
-   * the expire of this region server on the master.
-   * On the other hand, we will slow down the abort process on
-   * the region server so that it is still up during the master SSH.
+   * This test will test the expire handling of a meta-carrying region server. After
+   * HBaseMiniCluster is up, we will delete the ephemeral node of the meta-carrying region server,
+   * which will trigger the expire of this region server on the master. On the other hand, we will
+   * slow down the abort process on the region server so that it is still up during the master SSH.
    * We will check that the master SSH is still successfully done.
    */
   @Test
@@ -85,27 +83,25 @@ public class TestMetaShutdownHandler {
     SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     HMaster master = cluster.getMaster();
     RegionStates regionStates = master.getAssignmentManager().getRegionStates();
-    ServerName metaServerName =
-      regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    RegionInfo firstMetaRegion = regionStates.getRegionsOfTable(TableName.META_TABLE_NAME).get(0);
+    ServerName metaServerName = regionStates.getRegionServerOfRegion(firstMetaRegion);
     if (master.getServerName().equals(metaServerName) || metaServerName == null ||
       !metaServerName.equals(cluster.getServerHoldingMeta())) {
       // Move meta off master
       metaServerName =
         cluster.getLiveRegionServerThreads().get(0).getRegionServer().getServerName();
-      master.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
+      master.move(firstMetaRegion.getEncodedNameAsBytes(),
         Bytes.toBytes(metaServerName.getServerName()));
       TEST_UTIL.waitUntilNoRegionsInTransition(60000);
-      metaServerName =
-        regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
+      metaServerName = regionStates.getRegionServerOfRegion(firstMetaRegion);
     }
     assertNotEquals("Meta is on master!", metaServerName, master.getServerName());
     HRegionServer metaRegionServer = cluster.getRegionServer(metaServerName);
 
     // Delete the ephemeral node of the meta-carrying region server.
     // This is trigger the expire of this region server on the master.
-    String rsEphemeralNodePath =
-        ZNodePaths.joinZNode(master.getZooKeeper().getZNodePaths().rsZNode,
-                metaServerName.toString());
+    String rsEphemeralNodePath = ZNodePaths.joinZNode(master.getZooKeeper().getZNodePaths().rsZNode,
+      metaServerName.toString());
     ZKUtil.deleteNode(master.getZooKeeper(), rsEphemeralNodePath);
     LOG.info("Deleted the znode for the RegionServer hosting hbase:meta; waiting on SSH");
     // Wait for SSH to finish
@@ -115,18 +111,16 @@ public class TestMetaShutdownHandler {
     TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        return !serverManager.isServerOnline(priorMetaServerName)
-            && !serverManager.areDeadServersInProgress();
+        return !serverManager.isServerOnline(priorMetaServerName) &&
+          !serverManager.areDeadServersInProgress();
       }
     });
     LOG.info("Past wait on RIT");
     TEST_UTIL.waitUntilNoRegionsInTransition(60000);
     // Now, make sure meta is assigned
-    assertTrue("Meta should be assigned",
-      regionStates.isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO));
+    assertTrue("Meta should be assigned", regionStates.isRegionOnline(firstMetaRegion));
     // Now, make sure meta is registered in zk
-    ServerName newMetaServerName =
-      regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    ServerName newMetaServerName = regionStates.getRegionServerOfRegion(firstMetaRegion);
     assertNotEquals("Meta should be assigned on a different server", newMetaServerName,
       metaServerName);
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java
index cd13905..7d66dcf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
 import java.io.IOException;
+import java.util.HashSet;
 import java.util.Set;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.ServerName;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -115,16 +117,16 @@ public final class AssignmentTestingUtil {
 
   public static boolean isServerHoldingMeta(final HBaseTestingUtil util,
       final ServerName serverName) throws Exception {
-    for (RegionInfo hri: getMetaRegions(util)) {
-      if (serverName.equals(getServerHoldingRegion(util, hri))) {
-        return true;
-      }
+    HRegionServer server = util.getMiniHBaseCluster().getRegionServer(serverName);
+    if (server == null) {
+      return false;
     }
-    return false;
+    return !server.getRegions(TableName.META_TABLE_NAME).isEmpty();
   }
 
   public static Set<RegionInfo> getMetaRegions(final HBaseTestingUtil util) {
-    return getMaster(util).getAssignmentManager().getMetaRegionSet();
+    return new HashSet<>(getMaster(util).getAssignmentManager().getRegionStates()
+      .getTableRegionsInfo(TableName.META_TABLE_NAME));
   }
 
   private static HMaster getMaster(final HBaseTestingUtil util) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index be3bb24..259cf85 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
@@ -238,10 +237,7 @@ public class TestAssignmentManager extends TestAssignmentManagerBase {
     am = master.getAssignmentManager();
 
     // Assign meta
-    rsDispatcher.setMockRsExecutor(new HangThenRSRestartExecutor());
-    am.assign(RegionInfoBuilder.FIRST_META_REGIONINFO);
-    assertEquals(true, am.isMetaAssigned());
-
+    setUpMeta(new HangThenRSRestartExecutor());
     // set it back as default, see setUpMeta()
     am.wakeMetaLoadedEvent();
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
index 02e8600..300fdab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
@@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
 import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -141,8 +140,6 @@ public abstract class TestAssignmentManagerBase {
 
   protected void setupConfiguration(Configuration conf) throws Exception {
     CommonFSUtils.setRootDir(conf, util.getDataTestDir());
-    conf.setBoolean(WALProcedureStore.USE_HSYNC_CONF_KEY, false);
-    conf.setInt(WALProcedureStore.SYNC_WAIT_MSEC_CONF_KEY, 10);
     conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, PROC_NTHREADS);
     conf.setInt(RSProcedureDispatcher.RS_RPC_STARTUP_WAIT_TIME_CONF_KEY, 1000);
     conf.setInt(AssignmentManager.ASSIGN_MAX_ATTEMPTS, getAssignMaxAttempts());
@@ -168,12 +165,12 @@ public abstract class TestAssignmentManagerBase {
     reopenProcMetrics = am.getAssignmentManagerMetrics().getReopenProcMetrics();
     openProcMetrics = am.getAssignmentManagerMetrics().getOpenProcMetrics();
     closeProcMetrics = am.getAssignmentManagerMetrics().getCloseProcMetrics();
-    setUpMeta();
+    setUpMeta(new GoodRsExecutor());
   }
 
-  protected void setUpMeta() throws Exception {
-    rsDispatcher.setMockRsExecutor(new GoodRsExecutor());
-    am.assign(RegionInfoBuilder.FIRST_META_REGIONINFO);
+  protected final void setUpMeta(MockRSExecutor mockRsExec) throws Exception {
+    rsDispatcher.setMockRsExecutor(mockRsExec);
+    am.assign(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setRegionId(1).build());
     am.wakeMetaLoadedEvent();
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
index 9ec5110..6c28228 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
@@ -41,8 +41,6 @@ import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 @Category({MasterTests.class, LargeTests.class})
 public class TestAssignmentOnRSCrash {
@@ -51,8 +49,6 @@ public class TestAssignmentOnRSCrash {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestAssignmentOnRSCrash.class);
 
-  private static final Logger LOG = LoggerFactory.getLogger(TestAssignmentOnRSCrash.class);
-
   private static final TableName TEST_TABLE = TableName.valueOf("testb");
   private static final String FAMILY_STR = "f";
   private static final byte[] FAMILY = Bytes.toBytes(FAMILY_STR);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java
index 6c5a811..c06959e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java
@@ -64,8 +64,9 @@ public class TestHbckChore extends TestAssignmentManagerBase {
 
   @Test
   public void testForMeta() {
-    byte[] metaRegionNameAsBytes = RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName();
-    String metaRegionName = RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString();
+    RegionInfo meta = am.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).get(0);
+    byte[] metaRegionNameAsBytes = meta.getRegionName();
+    String metaRegionName = meta.getRegionNameAsString();
     List<ServerName> serverNames = master.getServerManager().getOnlineServersList();
     assertEquals(NSERVERS, serverNames.size());
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java
index 614385e..d94b9b9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java
@@ -43,28 +43,33 @@ import org.junit.experimental.categories.Category;
 public class TestMetaFixerNoCluster {
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestMetaFixerNoCluster.class);
-  private static byte[] A = Bytes.toBytes("a");
-  private static byte[] B = Bytes.toBytes("b");
-  private static byte[] C = Bytes.toBytes("c");
-  private static byte[] D = Bytes.toBytes("d");
-  private static RegionInfo ALL = RegionInfoBuilder.FIRST_META_REGIONINFO;
-  private static RegionInfo _ARI =
-    RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(A).build();
-  private static RegionInfo _BRI =
-    RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(B).build();
-  private static RegionInfo ABRI =
-    RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(B).build();
-  private static RegionInfo ACRI = org.apache.hadoop.hbase.client.RegionInfoBuilder
-    .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(C).build();
-  private static RegionInfo CDRI = org.apache.hadoop.hbase.client.RegionInfoBuilder
-    .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).setEndKey(D).build();
-  private static RegionInfo ADRI = org.apache.hadoop.hbase.client.RegionInfoBuilder
-    .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(D).build();
-  private static RegionInfo D_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder
-    .newBuilder(TableName.META_TABLE_NAME).setStartKey(D).build();
-  private static RegionInfo C_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder
-    .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).build();
+      HBaseClassTestRule.forClass(TestMetaFixerNoCluster.class);
+  private static byte [] A = Bytes.toBytes("a");
+  private static byte [] B = Bytes.toBytes("b");
+  private static byte [] C = Bytes.toBytes("c");
+  private static byte [] D = Bytes.toBytes("d");
+  private static RegionInfo ALL = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+  private static RegionInfo _ARI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
+      setEndKey(A).build();
+  private static RegionInfo _BRI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
+          setEndKey(B).build();
+  private static RegionInfo ABRI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
+          setStartKey(A).setEndKey(B).build();
+  private static RegionInfo ACRI =
+      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
+          setStartKey(A).setEndKey(C).build();
+  private static RegionInfo CDRI =
+      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
+          setStartKey(C).setEndKey(D).build();
+  private static RegionInfo ADRI =
+      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
+          setStartKey(A).setEndKey(D).build();
+  private static RegionInfo D_RI =
+      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
+          setStartKey(D).build();
+  private static RegionInfo C_RI =
+      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
+          setStartKey(C).build();
 
   @Test
   public void testGetRegionInfoWithLargestEndKey() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java
index f8d59de..98acb3c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -64,8 +64,8 @@ public class TestCleanupMetaWAL {
     TEST_UTIL.createTable(TableName.valueOf("test"), "cf");
     HRegionServer serverWithMeta = TEST_UTIL.getMiniHBaseCluster()
         .getRegionServer(TEST_UTIL.getMiniHBaseCluster().getServerWithMeta());
-    TEST_UTIL.getAdmin()
-        .move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
+    RegionInfo metaInfo = TEST_UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).get(0);
+    TEST_UTIL.getAdmin().move(metaInfo.getEncodedNameAsBytes());
     LOG.info("KILL");
     TEST_UTIL.getMiniHBaseCluster().killRegionServer(serverWithMeta.getServerName());
     LOG.info("WAIT");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 74bf075..13ac5c1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -954,9 +954,9 @@ public class TestDefaultMemStore {
     WALFactory wFactory = new WALFactory(conf, "1234");
     TableDescriptors tds = new FSTableDescriptors(conf);
     FSTableDescriptors.tryUpdateMetaTableDescriptor(conf);
-    HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir,
-        conf, tds.get(TableName.META_TABLE_NAME),
-        wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
+    RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+    HRegion meta = HRegion.createHRegion(metaRegionInfo, testDir, conf,
+      tds.get(TableName.META_TABLE_NAME), wFactory.getWAL(metaRegionInfo));
     // parameterized tests add [#] suffix get rid of [ and ].
     TableDescriptor desc = TableDescriptorBuilder
         .newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_")))
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java
index e832c47..49d9d66 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
 import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
@@ -62,11 +63,11 @@ public class TestDefaultStoreEngine {
     Configuration conf = HBaseConfiguration.create();
     conf.set(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY, DummyCompactor.class.getName());
     conf.set(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
-        DummyCompactionPolicy.class.getName());
-    conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
-        DummyStoreFlusher.class.getName());
+      DummyCompactionPolicy.class.getName());
+    conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, DummyStoreFlusher.class.getName());
     HStore mockStore = Mockito.mock(HStore.class);
-    Mockito.when(mockStore.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    Mockito.when(mockStore.getRegionInfo())
+      .thenReturn(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build());
     StoreEngine<?, ?, ?, ?> se = StoreEngine.create(mockStore, conf, CellComparatorImpl.COMPARATOR);
     Assert.assertTrue(se instanceof DefaultStoreEngine);
     Assert.assertTrue(se.getCompactionPolicy() instanceof DummyCompactionPolicy);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index a435b9d..350876e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -94,8 +94,8 @@ public class TestGetClosestAtOrBefore {
     FSTableDescriptors.tryUpdateMetaTableDescriptor(UTIL.getConfiguration());
     TableDescriptor td = tds.get(TableName.META_TABLE_NAME);
     td = TableDescriptorBuilder.newBuilder(td).setMemStoreFlushSize(64 * 1024 * 1024).build();
-    HRegion mr = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.FIRST_META_REGIONINFO,
-      rootdir, conf, td);
+    HRegion mr = HBaseTestingUtil.createRegionAndWAL(
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(), rootdir, conf, td);
     try {
       // Write rows for three tables 'A', 'B', and 'C'.
       for (char c = 'A'; c < 'D'; c++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java
index 6d202bd..17a5b4b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java
@@ -58,6 +58,9 @@ public class TestPriorityRpc {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestPriorityRpc.class);
 
+  private static final RegionInfo FIRST_META_REGIONINFO =
+    RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+
   private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
 
   private static HRegionServer RS = null;
@@ -87,8 +90,7 @@ public class TestPriorityRpc {
     GetRequest.Builder getRequestBuilder = GetRequest.newBuilder();
     RegionSpecifier.Builder regionSpecifierBuilder = RegionSpecifier.newBuilder();
     regionSpecifierBuilder.setType(RegionSpecifierType.REGION_NAME);
-    ByteString name = UnsafeByteOperations.unsafeWrap(
-        RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
+    ByteString name = UnsafeByteOperations.unsafeWrap(FIRST_META_REGIONINFO.getRegionName());
     regionSpecifierBuilder.setValue(name);
     RegionSpecifier regionSpecifier = regionSpecifierBuilder.build();
     getRequestBuilder.setRegion(regionSpecifier);
@@ -104,8 +106,7 @@ public class TestPriorityRpc {
     RegionInfo mockRegionInfo = Mockito.mock(RegionInfo.class);
     Mockito.when(mockRpc.getRegion(Mockito.any())).thenReturn(mockRegion);
     Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
-    Mockito.when(mockRegionInfo.getTable())
-        .thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable());
+    Mockito.when(mockRegionInfo.getTable()).thenReturn(FIRST_META_REGIONINFO.getTable());
     // Presume type.
     ((AnnotationReadingPriorityFunction)PRIORITY).setRegionServer(mockRS);
     assertEquals(
@@ -159,8 +160,7 @@ public class TestPriorityRpc {
     Mockito.when(mockRegionScanner.getRegionInfo()).thenReturn(mockRegionInfo);
     Mockito.when(mockRpc.getRegion((RegionSpecifier)Mockito.any())).thenReturn(mockRegion);
     Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
-    Mockito.when(mockRegionInfo.getTable())
-        .thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable());
+    Mockito.when(mockRegionInfo.getTable()).thenReturn(FIRST_META_REGIONINFO.getTable());
 
     // Presume type.
     ((AnnotationReadingPriorityFunction)PRIORITY).setRegionServer(mockRS);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java
index 9a2456d..db47527 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java
@@ -22,6 +22,7 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.Optional;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.ipc.RpcCall;
 import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -63,7 +64,8 @@ public class TestRSRpcServices {
     String userNameTest = RSRpcServices.getUserName();
     assertEquals("test", userNameTest);
     HRegion region = Mockito.mock(HRegion.class);
-    Mockito.when(region.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    Mockito.when(region.getRegionInfo())
+      .thenReturn(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build());
     RSRpcServices.RegionScannerHolder rsh = new RSRpcServices.RegionScannerHolder(null, region,
       null, null, false, false, clientIpAndPort,
       userNameTest);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
index c0e72cb..f37352f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
@@ -68,7 +68,7 @@ public class TestReadAndWriteRegionInfoFile {
 
   @Test
   public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedException {
-    RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
+    RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
     // Create a region. That'll write the .regioninfo file.
     FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR);
     FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
index 56a8ea0..6242eae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
@@ -57,70 +57,58 @@ import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 
-@Category({RegionServerTests.class, SmallTests.class})
+@Category({ RegionServerTests.class, SmallTests.class })
 public class TestRegionInfo {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestRegionInfo.class);
+    HBaseClassTestRule.forClass(TestRegionInfo.class);
 
   @Rule
   public TestName name = new TestName();
 
   @Test
   public void testIsStart() {
-    assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst());
-    org.apache.hadoop.hbase.client.RegionInfo ri =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setStartKey(Bytes.toBytes("not_start")).build();
+    assertTrue(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().isFirst());
+    RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+      .setStartKey(Bytes.toBytes("not_start")).build();
     assertFalse(ri.isFirst());
   }
 
   @Test
   public void testIsEnd() {
-    assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst());
-    org.apache.hadoop.hbase.client.RegionInfo ri =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setEndKey(Bytes.toBytes("not_end")).build();
+    assertTrue(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().isLast());
+    RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+      .setEndKey(Bytes.toBytes("not_end")).build();
     assertFalse(ri.isLast());
   }
 
   @Test
   public void testIsNext() {
-    byte [] bytes = Bytes.toBytes("row");
-    org.apache.hadoop.hbase.client.RegionInfo ri =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setEndKey(bytes).build();
-    org.apache.hadoop.hbase.client.RegionInfo ri2 =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setStartKey(bytes).build();
-    assertFalse(ri.isNext(RegionInfoBuilder.FIRST_META_REGIONINFO));
+    byte[] bytes = Bytes.toBytes("row");
+    RegionInfo ri =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(bytes).build();
+    RegionInfo ri2 =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(bytes).build();
+    assertFalse(ri.isNext(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build()));
     assertTrue(ri.isNext(ri2));
   }
 
   @Test
   public void testIsOverlap() {
-    byte [] a = Bytes.toBytes("a");
-    byte [] b = Bytes.toBytes("b");
-    byte [] c = Bytes.toBytes("c");
-    byte [] d = Bytes.toBytes("d");
-    org.apache.hadoop.hbase.client.RegionInfo all =
-        RegionInfoBuilder.FIRST_META_REGIONINFO;
-    org.apache.hadoop.hbase.client.RegionInfo ari =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setEndKey(a).build();
-    org.apache.hadoop.hbase.client.RegionInfo abri =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setStartKey(a).setEndKey(b).build();
-    org.apache.hadoop.hbase.client.RegionInfo adri =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setStartKey(a).setEndKey(d).build();
-    org.apache.hadoop.hbase.client.RegionInfo cdri =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setStartKey(c).setEndKey(d).build();
-    org.apache.hadoop.hbase.client.RegionInfo dri =
-        org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-            setStartKey(d).build();
+    byte[] a = Bytes.toBytes("a");
+    byte[] b = Bytes.toBytes("b");
+    byte[] c = Bytes.toBytes("c");
+    byte[] d = Bytes.toBytes("d");
+    RegionInfo all = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+    RegionInfo ari = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build();
+    RegionInfo abri =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(a).setEndKey(b).build();
+    RegionInfo adri =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(a).setEndKey(d).build();
+    RegionInfo cdri =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(c).setEndKey(d).build();
+    RegionInfo dri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(d).build();
     assertTrue(all.isOverlap(all));
     assertTrue(all.isOverlap(abri));
     assertFalse(abri.isOverlap(cdri));
@@ -146,21 +134,14 @@ public class TestRegionInfo {
     byte[] d = Bytes.toBytes("d");
     byte[] e = Bytes.toBytes("e");
     byte[] f = Bytes.toBytes("f");
-    org.apache.hadoop.hbase.client.RegionInfo ari =
-      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-        setEndKey(a).build();
-    org.apache.hadoop.hbase.client.RegionInfo abri =
-      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-        setStartKey(a).setEndKey(b).build();
-    org.apache.hadoop.hbase.client.RegionInfo eri =
-      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-        setEndKey(e).build();
-    org.apache.hadoop.hbase.client.RegionInfo cdri =
-      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-        setStartKey(c).setEndKey(d).build();
-    org.apache.hadoop.hbase.client.RegionInfo efri =
-      org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
-        setStartKey(e).setEndKey(f).build();
+    RegionInfo ari = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build();
+    RegionInfo abri =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(a).setEndKey(b).build();
+    RegionInfo eri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(e).build();
+    RegionInfo cdri =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(c).setEndKey(d).build();
+    RegionInfo efri =
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(e).setEndKey(f).build();
     assertFalse(ari.isOverlap(abri));
     assertTrue(abri.isOverlap(eri));
     assertFalse(cdri.isOverlap(efri));
@@ -169,8 +150,9 @@ public class TestRegionInfo {
 
   @Test
   public void testPb() throws DeserializationException {
-    RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
-    byte [] bytes = RegionInfo.toByteArray(hri);
+    RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf("test"))
+      .setStartKey(Bytes.toBytes("start")).build();
+    byte[] bytes = RegionInfo.toByteArray(hri);
     RegionInfo pbhri = RegionInfo.parseFrom(bytes);
     assertTrue(hri.equals(pbhri));
   }
@@ -178,28 +160,26 @@ public class TestRegionInfo {
   @Test
   public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
     HBaseTestingUtil htu = new HBaseTestingUtil();
-    RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
+    RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
     Path basedir = htu.getDataTestDir();
-    // Create a region.  That'll write the .regioninfo file.
+    // Create a region. That'll write the .regioninfo file.
     FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
     FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration());
     HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, basedir, htu.getConfiguration(),
-        fsTableDescriptors.get(TableName.META_TABLE_NAME));
+      fsTableDescriptors.get(TableName.META_TABLE_NAME));
     // Get modtime on the file.
     long modtime = getModTime(r);
     HBaseTestingUtil.closeRegionAndWAL(r);
     Thread.sleep(1001);
-    r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
-        null, htu.getConfiguration());
+    r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null,
+      htu.getConfiguration());
     // Ensure the file is not written for a second time.
     long modtime2 = getModTime(r);
     assertEquals(modtime, modtime2);
     // Now load the file.
-    org.apache.hadoop.hbase.client.RegionInfo deserializedHri =
-      HRegionFileSystem.loadRegionInfoFileContent(
-        r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
-    assertEquals(0,
-      org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri));
+    RegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
+      r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
+    assertEquals(0, RegionInfo.COMPARATOR.compare(hri, deserializedHri));
     HBaseTestingUtil.closeRegionAndWAL(r);
   }
 
@@ -219,19 +199,16 @@ public class TestRegionInfo {
     String id = "id";
 
     // old format region name
-    byte [] name = RegionInfo.createRegionName(tn, sk, id, false);
+    byte[] name = RegionInfo.createRegionName(tn, sk, id, false);
     String nameStr = Bytes.toString(name);
     assertEquals(tableName + "," + startKey + "," + id, nameStr);
 
-
     // new format region name.
     String md5HashInHex = MD5Hash.getMD5AsHex(name);
     assertEquals(RegionInfo.MD5_HEX_LENGTH, md5HashInHex.length());
     name = RegionInfo.createRegionName(tn, sk, id, true);
     nameStr = Bytes.toString(name);
-    assertEquals(tableName + "," + startKey + ","
-                 + id + "." + md5HashInHex + ".",
-                 nameStr);
+    assertEquals(tableName + "," + startKey + "," + id + "." + md5HashInHex + ".", nameStr);
   }
 
   @Test
@@ -309,7 +286,8 @@ public class TestRegionInfo {
 
   @Test
   public void testMetaTables() {
-    assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isMetaRegion());
+    assertTrue(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().isMetaRegion());
+    assertFalse(RegionInfoBuilder.newBuilder(TableName.valueOf("test")).build().isMetaRegion());
   }
 
   @SuppressWarnings("SelfComparison")
@@ -327,7 +305,7 @@ public class TestRegionInfo {
     RegionInfo b = RegionInfoBuilder.newBuilder(TableName.valueOf("b")).build();
     assertNotEquals(0, a.compareTo(b));
     TableName t = TableName.valueOf("t");
-    byte [] midway = Bytes.toBytes("midway");
+    byte[] midway = Bytes.toBytes("midway");
     a = RegionInfoBuilder.newBuilder(t).setEndKey(midway).build();
     b = RegionInfoBuilder.newBuilder(t).setStartKey(midway).build();
     assertTrue(a.compareTo(b) < 0);
@@ -363,21 +341,22 @@ public class TestRegionInfo {
     // assert with only the region name without encoding
 
     // primary, replicaId = 0
-    byte [] name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0, false);
+    byte[] name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0, false);
     String nameStr = Bytes.toString(name);
     assertEquals(tableName + "," + startKey + "," + id, nameStr);
 
     // replicaId = 1
     name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 1, false);
     nameStr = Bytes.toString(name);
-    assertEquals(tableName + "," + startKey + "," + id + "_" +
-      String.format(RegionInfo.REPLICA_ID_FORMAT, 1), nameStr);
+    assertEquals(
+      tableName + "," + startKey + "," + id + "_" + String.format(RegionInfo.REPLICA_ID_FORMAT, 1),
+      nameStr);
 
     // replicaId = max
     name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0xFFFF, false);
     nameStr = Bytes.toString(name);
     assertEquals(tableName + "," + startKey + "," + id + "_" +
-        String.format(RegionInfo.REPLICA_ID_FORMAT, 0xFFFF), nameStr);
+      String.format(RegionInfo.REPLICA_ID_FORMAT, 0xFFFF), nameStr);
   }
 
   @Test
@@ -391,21 +370,20 @@ public class TestRegionInfo {
     byte[] regionName = RegionInfo.createRegionName(tableName, startKey, regionId, false);
 
     byte[][] fields = RegionInfo.parseRegionName(regionName);
-    assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(), fields[0]);
-    assertArrayEquals(Bytes.toString(fields[1]),startKey, fields[1]);
-    assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)),fields[2]);
+    assertArrayEquals(Bytes.toString(fields[0]), tableName.getName(), fields[0]);
+    assertArrayEquals(Bytes.toString(fields[1]), startKey, fields[1]);
+    assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)), fields[2]);
     assertEquals(3, fields.length);
 
     // test with replicaId
-    regionName = RegionInfo.createRegionName(tableName, startKey, regionId,
-      replicaId, false);
+    regionName = RegionInfo.createRegionName(tableName, startKey, regionId, replicaId, false);
 
     fields = RegionInfo.parseRegionName(regionName);
-    assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(), fields[0]);
-    assertArrayEquals(Bytes.toString(fields[1]),startKey, fields[1]);
-    assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)),fields[2]);
-    assertArrayEquals(Bytes.toString(fields[3]), Bytes.toBytes(
-      String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)), fields[3]);
+    assertArrayEquals(Bytes.toString(fields[0]), tableName.getName(), fields[0]);
+    assertArrayEquals(Bytes.toString(fields[1]), startKey, fields[1]);
+    assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)), fields[2]);
+    assertArrayEquals(Bytes.toString(fields[3]),
+      Bytes.toBytes(String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)), fields[3]);
   }
 
   @Test
@@ -441,10 +419,11 @@ public class TestRegionInfo {
 
     assertEquals(expectedHri, convertedHri);
   }
+
   @Test
   public void testRegionDetailsForDisplay() throws IOException {
-    byte[] startKey = new byte[] {0x01, 0x01, 0x02, 0x03};
-    byte[] endKey = new byte[] {0x01, 0x01, 0x02, 0x04};
+    byte[] startKey = new byte[] { 0x01, 0x01, 0x02, 0x03 };
+    byte[] endKey = new byte[] { 0x01, 0x01, 0x02, 0x04 };
     Configuration conf = new Configuration();
     conf.setBoolean("hbase.display.keys", false);
     RegionInfo h = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
@@ -507,4 +486,3 @@ public class TestRegionInfo {
     }
   }
 }
-
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java
index 48729fa..6d767aa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java
@@ -51,7 +51,7 @@ public class TestRegionInfoStaticInitialization {
     // RegionInfoBuilder.
     final Supplier<RegionInfo> retrieveUNDEFINED = () -> RegionInfo.UNDEFINED;
     final Supplier<RegionInfo> retrieveMetaRegionInfo =
-      () -> RegionInfoBuilder.FIRST_META_REGIONINFO;
+      () -> RegionInfoBuilder.UNDEFINED;
 
     // The test runs multiple threads that reference these mutually dependent symbols. In order to
     // express this bug, these threads need to access these symbols at roughly the same time, so
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java
index f5330f6..dbab4e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
@@ -119,7 +120,8 @@ public class TestStripeStoreEngine {
 
   private static TestStoreEngine createEngine(Configuration conf) throws Exception {
     HStore store = mock(HStore.class);
-    when(store.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    when(store.getRegionInfo())
+      .thenReturn(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build());
     CellComparatorImpl kvComparator = mock(CellComparatorImpl.class);
     return (TestStoreEngine) StoreEngine.create(store, conf, kvComparator);
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java
index 31b95ee..c350384 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java
@@ -29,6 +29,7 @@ import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.logging.Log4jUtils;
 import org.apache.hadoop.hbase.regionserver.HStore;
@@ -206,7 +207,8 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator {
     HStore s = mock(HStore.class);
     when(s.getStoreFileTtl()).thenReturn(Long.MAX_VALUE);
     when(s.getBlockingFileCount()).thenReturn(7L);
-    when(s.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    when(s.getRegionInfo())
+      .thenReturn(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build());
     return s;
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
index 5eb94ac..6636770 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -100,6 +101,8 @@ public class TestStripeCompactionPolicy {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestStripeCompactionPolicy.class);
 
+  private static final RegionInfo FIRST_META_REGIONINFO =
+    RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
   private static final byte[] KEY_A = Bytes.toBytes("aaa");
   private static final byte[] KEY_B = Bytes.toBytes("bbb");
   private static final byte[] KEY_C = Bytes.toBytes("ccc");
@@ -169,7 +172,7 @@ public class TestStripeCompactionPolicy {
     conf.setInt(StripeStoreConfig.MAX_FILES_KEY, 4);
     conf.setLong(StripeStoreConfig.SIZE_TO_SPLIT_KEY, 1000); // make sure the are no splits
     StoreConfigInformation sci = mock(StoreConfigInformation.class);
-    when(sci.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    when(sci.getRegionInfo()).thenReturn(FIRST_META_REGIONINFO);
     StripeStoreConfig ssc = new StripeStoreConfig(conf, sci);
     StripeCompactionPolicy policy = new StripeCompactionPolicy(conf, sci, ssc) {
       @Override
@@ -515,7 +518,7 @@ public class TestStripeCompactionPolicy {
     conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, initialCount);
     StoreConfigInformation sci = mock(StoreConfigInformation.class);
     when(sci.getStoreFileTtl()).thenReturn(hasTtl ? defaultTtl : Long.MAX_VALUE);
-    when(sci.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    when(sci.getRegionInfo()).thenReturn(FIRST_META_REGIONINFO);
     StripeStoreConfig ssc = new StripeStoreConfig(conf, sci);
     return new StripeCompactionPolicy(conf, sci, ssc);
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
index c220639..0131944 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -159,9 +158,8 @@ public class TestLogRollingNoCluster {
       this.log.info(getName() +" started");
       final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
       try {
-        TableDescriptors tds = new FSTableDescriptors(TEST_UTIL.getConfiguration());
         FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration());
-        TableDescriptor htd = tds.get(TableName.META_TABLE_NAME);
+        RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
         for (int i = 0; i < this.count; i++) {
           long now = EnvironmentEdgeManager.currentTime();
           // Roll every ten edits
@@ -171,7 +169,6 @@ public class TestLogRollingNoCluster {
           WALEdit edit = new WALEdit();
           byte[] bytes = Bytes.toBytes(i);
           edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
-          RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
           NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
           for(byte[] fam: this.metaTableDescriptor.getColumnFamilyNames()) {
             scopes.put(fam, 0);
@@ -199,8 +196,4 @@ public class TestLogRollingNoCluster {
       }
     }
   }
-
-  //@org.junit.Rule
-  //public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
-  //  new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
index a0d5cc9..26ed003 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
@@ -67,9 +67,9 @@ public class TestReplicationWALEntryFilters {
     SystemTableWALEntryFilter filter = new SystemTableWALEntryFilter();
 
     // meta
-    WALKeyImpl key1 =
-      new WALKeyImpl(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
-        TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime());
+    WALKeyImpl key1 = new WALKeyImpl(
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build().getEncodedNameAsBytes(),
+      TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime());
     Entry metaEntry = new Entry(key1, null);
 
     assertNull(filter.filter(metaEntry));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index 16d4456..26d69b5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -28,7 +28,6 @@ import java.io.File;
 import java.io.IOException;
 import java.util.List;
 import java.util.Random;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -42,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
@@ -232,8 +232,8 @@ public class TestFSUtils {
     Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
     assertTrue(CommonFSUtils.isExists(fs, versionFile));
     assertTrue(CommonFSUtils.delete(fs, versionFile, true));
-    Path metaRegionDir =
-        FSUtils.getRegionDirFromRootDir(rootdir, RegionInfoBuilder.FIRST_META_REGIONINFO);
+    Path metaRegionDir = FSUtils.getRegionDirFromRootDir(rootdir,
+      RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build());
     FsPermission defaultPerms = CommonFSUtils.getFilePermissions(fs, this.conf,
         HConstants.DATA_FILE_UMASK_KEY);
     CommonFSUtils.create(fs, metaRegionDir, defaultPerms, false);
@@ -297,7 +297,7 @@ public class TestFSUtils {
     assertEquals(new FsPermission("700"), filePerm);
 
     // then that the correct file is created
-    Path p = new Path("target" + File.separator + htu.getRandomUUID().toString());
+    Path p = new Path("target" + File.separator + HBaseTestingUtil.getRandomUUID().toString());
     try {
       FSDataOutputStream out = FSUtils.create(conf, fs, p, filePerm, null);
       out.close();
@@ -316,7 +316,7 @@ public class TestFSUtils {
     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
     FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
     // then that the correct file is created
-    String file = htu.getRandomUUID().toString();
+    String file = HBaseTestingUtil.getRandomUUID().toString();
     Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file);
     Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file);
     try {
@@ -357,7 +357,7 @@ public class TestFSUtils {
     FileSystem fs = FileSystem.get(conf);
     Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
 
-    String file = htu.getRandomUUID().toString();
+    String file = HBaseTestingUtil.getRandomUUID().toString();
     Path p = new Path(testDir, file);
 
     FSDataOutputStream out = fs.create(p);
@@ -371,7 +371,7 @@ public class TestFSUtils {
     mockEnv.setValue(expect);
     EnvironmentEdgeManager.injectEdge(mockEnv);
     try {
-      String dstFile = htu.getRandomUUID().toString();
+      String dstFile = HBaseTestingUtil.getRandomUUID().toString();
       Path dst = new Path(testDir , dstFile);
 
       assertTrue(CommonFSUtils.renameAndSetModifyTime(fs, p, dst));
@@ -453,7 +453,7 @@ public class TestFSUtils {
           conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
       CommonFSUtils.setStoragePolicy(fs, testDir, storagePolicy);
 
-      String file =htu.getRandomUUID().toString();
+      String file = HBaseTestingUtil.getRandomUUID().toString();
       Path p = new Path(testDir, file);
       WriteDataToHDFS(fs, p, 4096);
       HFileSystem hfs = new HFileSystem(fs);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
index 8bfd4dd..50a28ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.Method;
@@ -91,10 +92,12 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
 import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 
@@ -135,6 +138,8 @@ public class TestWALSplit {
   private static String ROBBER;
   private static String ZOMBIE;
   private static String [] GROUP = new String [] {"supergroup"};
+  private static RegionInfo FIRST_META_REGIONINFO =
+    RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
 
   static enum Corruptions {
     INSERT_GARBAGE_ON_FIRST_LINE,
@@ -371,7 +376,7 @@ public class TestWALSplit {
   public void testRecoveredEditsPathForMeta() throws IOException {
     Path p = createRecoveredEditsPathForRegion();
     String parentOfParent = p.getParent().getParent().getName();
-    assertEquals(parentOfParent, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
+    assertEquals(parentOfParent, FIRST_META_REGIONINFO.getEncodedName());
   }
 
   /**
@@ -383,18 +388,18 @@ public class TestWALSplit {
     Path p = createRecoveredEditsPathForRegion();
     Path tdir = CommonFSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME);
     Path regiondir = new Path(tdir,
-      RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
+      FIRST_META_REGIONINFO.getEncodedName());
     fs.mkdirs(regiondir);
     Path parent = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
     assertEquals(HConstants.RECOVERED_EDITS_DIR, parent.getName());
     fs.createNewFile(parent); // create a recovered.edits file
     String parentOfParent = p.getParent().getParent().getName();
-    assertEquals(parentOfParent, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
+    assertEquals(parentOfParent, FIRST_META_REGIONINFO.getEncodedName());
     WALFactory.createRecoveredEditsWriter(fs, p, conf).close();
   }
 
   private Path createRecoveredEditsPathForRegion() throws IOException {
-    byte[] encoded = RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
+    byte[] encoded = FIRST_META_REGIONINFO.getEncodedNameAsBytes();
     long now = EnvironmentEdgeManager.currentTime();
     Entry entry = new Entry(
         new WALKeyImpl(encoded, TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
@@ -408,10 +413,10 @@ public class TestWALSplit {
   @Test
   public void testHasRecoveredEdits() throws IOException {
     Path p = createRecoveredEditsPathForRegion();
-    assertFalse(WALSplitUtil.hasRecoveredEdits(conf, RegionInfoBuilder.FIRST_META_REGIONINFO));
+    assertFalse(WALSplitUtil.hasRecoveredEdits(conf, FIRST_META_REGIONINFO));
     String renamedEdit = p.getName().split("-")[0];
     fs.createNewFile(new Path(p.getParent(), renamedEdit));
-    assertTrue(WALSplitUtil.hasRecoveredEdits(conf, RegionInfoBuilder.FIRST_META_REGIONINFO));
+    assertTrue(WALSplitUtil.hasRecoveredEdits(conf, FIRST_META_REGIONINFO));
   }
 
   private void useDifferentDFSClient() throws IOException {
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
index a98bd6e..22c7d77 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -21,7 +21,6 @@ import java.io.Closeable;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -357,10 +356,7 @@ public abstract class HBaseCluster implements Closeable, Configurable {
   /**
    * Get the ServerName of region server serving the first hbase:meta region
    */
-  public ServerName getServerHoldingMeta() throws IOException {
-    return getServerHoldingRegion(TableName.META_TABLE_NAME,
-      RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
-  }
+  public abstract ServerName getServerHoldingMeta() throws IOException;
 
   /**
    * Get the ServerName of region server serving the specified region
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index f8dce25..7a9b397 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -26,7 +26,8 @@ import java.util.List;
 import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
@@ -832,7 +833,20 @@ public class MiniHBaseCluster extends HBaseCluster {
    * of HRS carrying regionName. Returns -1 if none found.
    */
   public int getServerWithMeta() {
-    return getServerWith(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
+    int index = 0;
+    for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
+      HRegionServer hrs = rst.getRegionServer();
+      if (!hrs.isStopped()) {
+        for (Region region : hrs.getRegions(TableName.META_TABLE_NAME)) {
+          RegionInfo ri = region.getRegionInfo();
+          if (ri.isFirst() && RegionReplicaUtil.isDefaultReplica(ri)) {
+            return index;
+          }
+        }
+      }
+      index++;
+    }
+    return -1;
   }
 
   /**
@@ -857,8 +871,24 @@ public class MiniHBaseCluster extends HBaseCluster {
   }
 
   @Override
+  public ServerName getServerHoldingMeta() throws IOException {
+    for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
+      HRegionServer hrs = rst.getRegionServer();
+      if (!hrs.isStopped()) {
+        for (Region region : hrs.getRegions(TableName.META_TABLE_NAME)) {
+          RegionInfo ri = region.getRegionInfo();
+          if (ri.isFirst() && RegionReplicaUtil.isDefaultReplica(ri)) {
+            return hrs.getServerName();
+          }
+        }
+      }
+    }
+    return null;
+  }
+
+  @Override
   public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName)
-  throws IOException {
+    throws IOException {
     // Assume there is only one master thread which is the active master.
     // If there are multiple master threads, the backup master threads
     // should hold some regions. Please refer to #countServedRegions

[hbase] 08/09: HBASE-24929 Introduce a special CellComparator for master local region (#2378)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 2cc9cb9e83e56479a3b6ddf4a4fe560805dfbaa7
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Sep 15 21:11:55 2020 +0800

    HBASE-24929 Introduce a special CellComparator for master local region (#2378)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../org/apache/hadoop/hbase/PrivateCellUtil.java   | 13 +++
 .../hadoop/hbase/master/region/MasterRegion.java   |  4 +-
 .../master/region/MasterRegionCellComparator.java  | 92 ++++++++++++++++++++++
 .../hbase/master/region/MasterRegionFactory.java   |  2 +-
 .../hbase/master/region/MasterRegionParams.java    | 11 ---
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 25 ++++--
 6 files changed, 124 insertions(+), 23 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
index 810eb24..fd65980 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
@@ -722,6 +722,19 @@ public final class PrivateCellUtil {
         length);
   }
 
+  public static boolean rowsStartWith(Cell left, byte[] startsWith) {
+    if (left.getRowLength() < startsWith.length) {
+      return false;
+    }
+    if (left instanceof ByteBufferExtendedCell) {
+      return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getRowByteBuffer(),
+        ((ByteBufferExtendedCell) left).getRowPosition(), startsWith.length, startsWith, 0,
+        startsWith.length);
+    }
+    return Bytes.equals(left.getRowArray(), left.getRowOffset(), startsWith.length, startsWith, 0,
+      startsWith.length);
+  }
+
   public static boolean matchingFamily(final Cell left, final byte[] buf, final int offset,
       final int length) {
     if (left instanceof ByteBufferExtendedCell) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
index c2188b4..c1d9f47 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
@@ -284,9 +284,7 @@ public final class MasterRegion {
     if (params.useHsync() != null) {
       conf.setBoolean(HRegion.WAL_HSYNC_CONF_KEY, params.useHsync());
     }
-    if (params.useMetaCellComparator() != null) {
-      conf.setBoolean(HRegion.USE_META_CELL_COMPARATOR, params.useMetaCellComparator());
-    }
+    conf.setBoolean(HRegion.USE_MASTER_REGION_CELL_COMPARATOR, true);
     conf.setInt(AbstractFSWAL.RING_BUFFER_SLOT_COUNT,
       IntMath.ceilingPowerOfTwo(params.ringBufferSlotCount()));
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionCellComparator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionCellComparator.java
new file mode 100644
index 0000000..3960c41
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionCellComparator.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.region;
+
+import java.util.Comparator;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.MetaCellComparator;
+import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Cell comparator implementation for master local region.
+ * <p/>
+ * In general, for catalog family, we need to use {@link MetaCellComparator} while for other
+ * families, we need to use {@link CellComparatorImpl}.
+ * <p/>
+ * The trick here is to check the row key format, if it is start with 'hbase:meta', we will use
+ * {@link MetaCellComparator}, otherwise we will use {@link CellComparatorImpl}.
+ */
+@InterfaceAudience.Private
+public class MasterRegionCellComparator extends CellComparatorImpl {
+
+  /**
+   * A {@link MasterRegionCellComparator} for {@link MasterRegion} {@link Cell}s.
+   */
+  public static final MasterRegionCellComparator MASTER_REGION_COMPARATOR =
+    new MasterRegionCellComparator();
+
+  private static final byte[] CATALOG_ROW_PREFIX = TableName.META_TABLE_NAME.getName();
+
+  private boolean isCatalogRow(Cell c) {
+    return PrivateCellUtil.rowsStartWith(c, CATALOG_ROW_PREFIX);
+  }
+
+  private boolean isCatalogRow(byte[] row, int off, int len) {
+    if (len < CATALOG_ROW_PREFIX.length) {
+      return false;
+    }
+    return Bytes.equals(row, off, CATALOG_ROW_PREFIX.length, CATALOG_ROW_PREFIX, 0,
+      CATALOG_ROW_PREFIX.length);
+  }
+
+  @Override
+  public int compare(Cell a, Cell b, boolean ignoreSequenceid) {
+    if (isCatalogRow(a) || isCatalogRow(b)) {
+      return MetaCellComparator.META_COMPARATOR.compare(a, b, ignoreSequenceid);
+    } else {
+      return super.compare(a, b, ignoreSequenceid);
+    }
+  }
+
+  @Override
+  public int compareRows(Cell left, Cell right) {
+    if (isCatalogRow(left) || isCatalogRow(right)) {
+      return MetaCellComparator.META_COMPARATOR.compareRows(left, right);
+    } else {
+      return super.compareRows(left, right);
+    }
+  }
+
+  @Override
+  public int compareRows(Cell left, byte[] right, int roffset, int rlength) {
+    if (isCatalogRow(left) || isCatalogRow(right, roffset, rlength)) {
+      return MetaCellComparator.META_COMPARATOR.compareRows(left, right, roffset, rlength);
+    } else {
+      return super.compareRows(left, right, roffset, rlength);
+    }
+  }
+
+  @Override
+  public Comparator getSimpleComparator() {
+    return this;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
index cfa25f5..d70aef6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
@@ -107,7 +107,7 @@ public final class MasterRegionFactory {
     params.ringBufferSlotCount(conf.getInt(RING_BUFFER_SLOT_COUNT, DEFAULT_RING_BUFFER_SLOT_COUNT));
     long rollPeriodMs = conf.getLong(ROLL_PERIOD_MS_KEY, DEFAULT_ROLL_PERIOD_MS);
     params.rollPeriodMs(rollPeriodMs).archivedWalSuffix(ARCHIVED_WAL_SUFFIX)
-      .archivedHFileSuffix(ARCHIVED_HFILE_SUFFIX).useMetaCellComparator(true);
+      .archivedHFileSuffix(ARCHIVED_HFILE_SUFFIX);
     return MasterRegion.create(params);
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
index f2a03a4..1bfc84d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
@@ -53,8 +53,6 @@ public class MasterRegionParams {
 
   private String archivedHFileSuffix;
 
-  private Boolean useMetaCellComparator;
-
   public MasterRegionParams server(Server server) {
     this.server = server;
     return this;
@@ -120,11 +118,6 @@ public class MasterRegionParams {
     return this;
   }
 
-  public MasterRegionParams useMetaCellComparator(boolean useMetaCellComparator) {
-    this.useMetaCellComparator = useMetaCellComparator;
-    return this;
-  }
-
   public Server server() {
     return server;
   }
@@ -176,8 +169,4 @@ public class MasterRegionParams {
   public String archivedHFileSuffix() {
     return archivedHFileSuffix;
   }
-
-  public Boolean useMetaCellComparator() {
-    return useMetaCellComparator;
-  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ef1e7ae..61f5833 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -134,6 +134,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.RpcCall;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.master.region.MasterRegionCellComparator;
 import org.apache.hadoop.hbase.mob.MobFileCache;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@@ -251,12 +252,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     "hbase.hregion.special.recovered.edits.dir";
 
   /**
-   * Whether to use {@link MetaCellComparator} even if we are not meta region. Used when creating
-   * master local region.
+   * Whether to use {@link MasterRegionCellComparator}.
    */
-  public static final String USE_META_CELL_COMPARATOR = "hbase.region.use.meta.cell.comparator";
-
-  public static final boolean DEFAULT_USE_META_CELL_COMPARATOR = false;
+  public static final String USE_MASTER_REGION_CELL_COMPARATOR =
+    "hbase.region.use.master.region.cell.comparator";
 
   final AtomicBoolean closed = new AtomicBoolean(false);
 
@@ -736,6 +735,18 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       wal, confParam, htd, rsServices);
   }
 
+  private CellComparator getCellComparator(Configuration conf, TableDescriptor htd) {
+    boolean useMasterRegionCellComparator =
+      conf.getBoolean(USE_MASTER_REGION_CELL_COMPARATOR, false);
+    if (useMasterRegionCellComparator) {
+      return MasterRegionCellComparator.MASTER_REGION_COMPARATOR;
+    } else if (htd.isMetaTable()) {
+      return MetaCellComparator.META_COMPARATOR;
+    } else {
+      return CellComparatorImpl.COMPARATOR;
+    }
+  }
+
   /**
    * HRegion constructor. This constructor should only be used for testing and
    * extensions.  Instances of HRegion should be instantiated with the
@@ -769,9 +780,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
     this.baseConf = confParam;
     this.conf = new CompoundConfiguration().add(confParam).addBytesMap(htd.getValues());
-    this.cellComparator = htd.isMetaTable() ||
-      conf.getBoolean(USE_META_CELL_COMPARATOR, DEFAULT_USE_META_CELL_COMPARATOR) ?
-        MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR;
+    this.cellComparator = getCellComparator(this.conf, htd);
     this.lock = new ReentrantReadWriteLock(conf.getBoolean(FAIR_REENTRANT_CLOSE_LOCK,
         DEFAULT_FAIR_REENTRANT_CLOSE_LOCK));
     this.regionLockHolders = new ConcurrentHashMap<>();

[hbase] 02/09: HBASE-24389 Introduce new master rpc methods to locate meta region through root region (#1774)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 48be0e00f7bc149a9addabd52442309c5853463e
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Sat Jun 27 15:47:51 2020 +0800

    HBASE-24389 Introduce new master rpc methods to locate meta region through root region (#1774)
    
    Signed-off-by: stack <st...@apache.org>
---
 .../apache/hadoop/hbase/CatalogFamilyFormat.java   |  30 +
 .../hadoop/hbase/ClientMetaTableAccessor.java      |  29 +-
 .../client/AbstractAsyncTableRegionLocator.java    | 310 +++++++++
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |   4 +-
 .../hbase/client/AsyncMetaRegionLocator.java       | 146 -----
 .../hbase/client/AsyncMetaTableRegionLocator.java  | 155 +++++
 .../hbase/client/AsyncNonMetaRegionLocator.java    | 725 ---------------------
 .../client/AsyncNonMetaTableRegionLocator.java     | 206 ++++++
 .../hadoop/hbase/client/AsyncRegionLocator.java    | 170 +++--
 .../hbase/client/AsyncRegionLocatorHelper.java     |  42 +-
 .../hbase/client/AsyncTableRegionLocator.java      |   4 +-
 .../hbase/client/AsyncTableRegionLocatorImpl.java  |  11 +-
 .../hadoop/hbase/client/ConnectionRegistry.java    |   6 -
 .../hadoop/hbase/client/ConnectionUtils.java       |  18 -
 .../apache/hadoop/hbase/client/MasterRegistry.java |   2 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  47 +-
 .../hadoop/hbase/client/RegionLocateType.java      |   5 +-
 .../hbase/client/TableRegionLocationCache.java     | 226 +++++++
 .../hadoop/hbase/client/ZKConnectionRegistry.java  |  10 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  27 +
 .../hbase/client/DoNothingConnectionRegistry.java  |   6 -
 .../client/TestAsyncMetaRegionLocatorFailFast.java |  67 --
 .../client/TestAsyncRegionLocatorTracing.java      |  75 ++-
 .../apache/hadoop/hbase/MetaCellComparator.java    |   2 +-
 .../src/main/protobuf/server/master/Master.proto   |  35 +
 .../hadoop/hbase/coprocessor/MasterObserver.java   |  44 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    | 115 +++-
 .../hadoop/hbase/master/MasterCoprocessorHost.java |  40 ++
 .../hadoop/hbase/master/MasterRpcServices.java     | 125 +++-
 .../apache/hadoop/hbase/master/MasterServices.java |   9 +
 .../hbase/master/MetaRegionLocationCache.java      |   9 +-
 .../hbase/master/assignment/RegionStateStore.java  |   6 +-
 .../hbase/master/http/MasterStatusServlet.java     |   5 +-
 .../master/procedure/CreateTableProcedure.java     |   2 -
 .../hbase/master/procedure/ProcedureSyncWait.java  |  14 -
 .../master/snapshot/MasterSnapshotVerifier.java    |   8 +-
 .../hbase/master/snapshot/TakeSnapshotHandler.java |  14 +-
 .../flush/MasterFlushTableProcedureManager.java    |  18 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  18 +-
 .../main/resources/hbase-webapps/master/table.jsp  |  34 +-
 .../apache/hadoop/hbase/TestMetaTableAccessor.java |   8 -
 .../apache/hadoop/hbase/TestMetaTableLocator.java  | 207 ------
 .../hbase/client/AbstractTestRegionLocator.java    |   5 +-
 .../hbase/client/DummyConnectionRegistry.java      |   6 -
 .../hbase/client/MetaWithReplicasTestBase.java     |   9 +-
 .../hbase/client/RegionReplicaTestHelper.java      |  15 +-
 .../client/TestAsyncAdminWithRegionReplicas.java   |   5 +-
 .../hbase/client/TestAsyncMetaRegionLocator.java   |  21 +-
 .../client/TestAsyncNonMetaRegionLocator.java      |   5 +-
 .../hbase/client/TestAsyncRegionAdminApi2.java     |  45 +-
 ... => TestAsyncRegionLocatorConcurrenyLimit.java} |  18 +-
 .../hbase/client/TestAsyncTableAdminApi.java       |  52 +-
 .../hbase/client/TestAsyncTableAdminApi3.java      |  24 +-
 .../hbase/client/TestAsyncTableLocatePrefetch.java |  14 +-
 .../hbase/client/TestAsyncTableRSCrashPublish.java |   3 +-
 .../client/TestAsyncTableUseMetaReplicas.java      |   4 +-
 ...estCatalogReplicaLoadBalanceSimpleSelector.java |  13 +-
 .../hadoop/hbase/client/TestMasterRegistry.java    |   2 +-
 .../hbase/client/TestMetaRegionLocationCache.java  |  39 +-
 .../TestMetaWithReplicasShutdownHandling.java      |  15 +-
 .../hadoop/hbase/client/TestReplicasClient.java    |  14 +-
 .../hbase/client/TestZKConnectionRegistry.java     |   3 +-
 .../hbase/master/MockNoopMasterServices.java       |   6 +
 .../hadoop/hbase/master/TestMasterFailover.java    |  53 +-
 .../master/TestMetaAssignmentWithStopMaster.java   |  16 +-
 .../hbase/master/TestMetaShutdownHandler.java      |  12 +-
 .../master/assignment/TestRegionReplicaSplit.java  |   5 +-
 .../TestCompactionLifeCycleTracker.java            |   4 +-
 .../hbase/regionserver/TestRegionReplicas.java     |   2 +-
 .../regionserver/TestRegionServerNoMaster.java     |  42 +-
 ...stRegionReplicaReplicationEndpointNoMaster.java |  58 +-
 .../hadoop/hbase/util/TestHBaseFsckEncryption.java |   2 +-
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   | 300 +--------
 .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java  |   7 -
 74 files changed, 1853 insertions(+), 2000 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java
index 978198b..0178aeb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hbase;
 
+import static org.apache.hadoop.hbase.HConstants.NINES;
+import static org.apache.hadoop.hbase.HConstants.ZEROES;
+import static org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
+
 import edu.umd.cs.findbugs.annotations.Nullable;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -31,8 +35,11 @@ import java.util.regex.Pattern;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocateType;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -418,4 +425,27 @@ public class CatalogFamilyFormat {
     }
     return deleteReplicaLocations;
   }
+
+  private static byte[] buildRegionLocateStartRow(TableName tableName, byte[] row,
+    RegionLocateType locateType) {
+    if (locateType.equals(RegionLocateType.BEFORE)) {
+      if (Bytes.equals(row, HConstants.EMPTY_END_ROW)) {
+        byte[] binaryTableName = tableName.getName();
+        return Arrays.copyOf(binaryTableName, binaryTableName.length + 1);
+      } else {
+        return createRegionName(tableName, row, ZEROES, false);
+      }
+    } else {
+      return createRegionName(tableName, row, NINES, false);
+    }
+  }
+
+  public static Scan createRegionLocateScan(TableName tableName, byte[] row,
+    RegionLocateType locateType, int prefetchLimit) {
+    byte[] startRow = buildRegionLocateStartRow(tableName, row, locateType);
+    byte[] stopRow = RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false);
+    return new Scan().withStartRow(startRow).withStopRow(stopRow, true)
+      .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(prefetchLimit)
+      .setReadType(ReadType.PREAD);
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
index ecc6573..74d2322 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
@@ -164,26 +164,27 @@ public final class ClientMetaTableAccessor {
 
   /**
    * Used to get all region locations for the specific table.
-   * @param metaTable
    * @param tableName table we're looking for, can be null for getting all regions
    * @return the list of region locations. The return value will be wrapped by a
    *         {@link CompletableFuture}.
    */
   public static CompletableFuture<List<HRegionLocation>> getTableHRegionLocations(
-    AsyncTable<AdvancedScanResultConsumer> metaTable, TableName tableName) {
+    AsyncTable<AdvancedScanResultConsumer> metaTable, TableName tableName,
+    boolean excludeOfflinedSplitParents) {
     CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
-    addListener(getTableRegionsAndLocations(metaTable, tableName, true), (locations, err) -> {
-      if (err != null) {
-        future.completeExceptionally(err);
-      } else if (locations == null || locations.isEmpty()) {
-        future.complete(Collections.emptyList());
-      } else {
-        List<HRegionLocation> regionLocations =
-          locations.stream().map(loc -> new HRegionLocation(loc.getFirst(), loc.getSecond()))
-            .collect(Collectors.toList());
-        future.complete(regionLocations);
-      }
-    });
+    addListener(getTableRegionsAndLocations(metaTable, tableName, excludeOfflinedSplitParents),
+      (locations, err) -> {
+        if (err != null) {
+          future.completeExceptionally(err);
+        } else if (locations == null || locations.isEmpty()) {
+          future.complete(Collections.emptyList());
+        } else {
+          List<HRegionLocation> regionLocations =
+            locations.stream().map(loc -> new HRegionLocation(loc.getFirst(), loc.getSecond()))
+              .collect(Collectors.toList());
+          future.complete(regionLocations);
+        }
+      });
     return future;
   }
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractAsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractAsyncTableRegionLocator.java
new file mode 100644
index 0000000..aa7d7e4
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractAsyncTableRegionLocator.java
@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
+import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
+
+import com.google.errorprone.annotations.RestrictedApi;
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import org.apache.commons.lang3.ObjectUtils;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The base class for locating region of a table.
+ */
+@InterfaceAudience.Private
+abstract class AbstractAsyncTableRegionLocator {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractAsyncTableRegionLocator.class);
+
+  protected final AsyncConnectionImpl conn;
+
+  protected final TableName tableName;
+
+  protected final int maxConcurrent;
+
+  protected final TableRegionLocationCache cache;
+
+  protected static final class LocateRequest {
+
+    final byte[] row;
+
+    final RegionLocateType locateType;
+
+    public LocateRequest(byte[] row, RegionLocateType locateType) {
+      this.row = row;
+      this.locateType = locateType;
+    }
+
+    @Override
+    public int hashCode() {
+      return Bytes.hashCode(row) ^ locateType.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (obj == null || obj.getClass() != LocateRequest.class) {
+        return false;
+      }
+      LocateRequest that = (LocateRequest) obj;
+      return locateType.equals(that.locateType) && Bytes.equals(row, that.row);
+    }
+  }
+
+  private final Set<LocateRequest> pendingRequests = new HashSet<>();
+
+  private final Map<LocateRequest, CompletableFuture<RegionLocations>> allRequests =
+    new LinkedHashMap<>();
+
+  AbstractAsyncTableRegionLocator(AsyncConnectionImpl conn, TableName tableName,
+    int maxConcurrent, Comparator<byte[]> comparator) {
+    this.conn = conn;
+    this.tableName = tableName;
+    this.maxConcurrent = maxConcurrent;
+    this.cache = new TableRegionLocationCache(comparator, conn.getConnectionMetrics());
+  }
+
+  private boolean hasQuota() {
+    return pendingRequests.size() < maxConcurrent;
+  }
+
+  protected final Optional<LocateRequest> getCandidate() {
+    return allRequests.keySet().stream().filter(r -> !pendingRequests.contains(r)).findFirst();
+  }
+
+  void clearCompletedRequests(RegionLocations locations) {
+    for (Iterator<Map.Entry<LocateRequest, CompletableFuture<RegionLocations>>> iter =
+      allRequests.entrySet().iterator(); iter.hasNext();) {
+      Map.Entry<LocateRequest, CompletableFuture<RegionLocations>> entry = iter.next();
+      if (tryComplete(entry.getKey(), entry.getValue(), locations)) {
+        iter.remove();
+      }
+    }
+  }
+
+  private boolean tryComplete(LocateRequest req, CompletableFuture<RegionLocations> future,
+    RegionLocations locations) {
+    if (future.isDone()) {
+      return true;
+    }
+    if (locations == null) {
+      return false;
+    }
+    HRegionLocation loc = ObjectUtils.firstNonNull(locations.getRegionLocations());
+    // we should at least have one location available, otherwise the request should fail and
+    // should not arrive here
+    assert loc != null;
+    boolean completed;
+    if (req.locateType.equals(RegionLocateType.BEFORE)) {
+      // for locating the row before current row, the common case is to find the previous region
+      // in reverse scan, so we check the endKey first. In general, the condition should be
+      // startKey < req.row and endKey >= req.row. Here we split it to endKey == req.row ||
+      // (endKey > req.row && startKey < req.row). The two conditions are equal since startKey <
+      // endKey.
+      byte[] endKey = loc.getRegion().getEndKey();
+      int c = Bytes.compareTo(endKey, req.row);
+      completed = c == 0 || ((c > 0 || Bytes.equals(EMPTY_END_ROW, endKey)) &&
+        Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0);
+    } else {
+      completed = loc.getRegion().containsRow(req.row);
+    }
+    if (completed) {
+      future.complete(locations);
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  protected final void onLocateComplete(LocateRequest req, RegionLocations locs, Throwable error) {
+    if (error != null) {
+      LOG.warn("Failed to locate region in '" + tableName + "', row='" +
+        Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType, error);
+    }
+    Optional<LocateRequest> toSend = Optional.empty();
+    if (locs != null) {
+      RegionLocations addedLocs = cache.add(locs);
+      synchronized (this) {
+        pendingRequests.remove(req);
+        clearCompletedRequests(addedLocs);
+        // Remove a complete locate request in a synchronized block, so the table cache must have
+        // quota to send a candidate request.
+        toSend = getCandidate();
+        toSend.ifPresent(pendingRequests::add);
+      }
+      toSend.ifPresent(this::locate);
+    } else {
+      // we meet an error
+      assert error != null;
+      synchronized (this) {
+        pendingRequests.remove(req);
+        // fail the request itself, no matter whether it is a DoNotRetryIOException, as we have
+        // already retried several times
+        CompletableFuture<?> future = allRequests.remove(req);
+        if (future != null) {
+          future.completeExceptionally(error);
+        }
+        clearCompletedRequests(null);
+        // Remove a complete locate request in a synchronized block, so the table cache must have
+        // quota to send a candidate request.
+        toSend = getCandidate();
+        toSend.ifPresent(pendingRequests::add);
+      }
+      toSend.ifPresent(this::locate);
+    }
+  }
+
+  // return false means you do not need to go on, just return. And you do not need to call the above
+  // onLocateComplete either when returning false, as we will call it in this method for you, this
+  // is why we need to pass the LocateRequest as a parameter.
+  protected final boolean validateRegionLocations(RegionLocations locs, LocateRequest req) {
+    // remove HRegionLocation with null location, i.e, getServerName returns null.
+    if (locs != null) {
+      locs = locs.removeElementsWithNullLocation();
+    }
+
+    // the default region location should always be presented when fetching from meta, otherwise
+    // let's fail the request.
+    if (locs == null || locs.getDefaultRegionLocation() == null) {
+      onLocateComplete(req, null,
+        new HBaseIOException(String.format("No location found for '%s', row='%s', locateType=%s",
+          tableName, Bytes.toStringBinary(req.row), req.locateType)));
+      return false;
+    }
+    HRegionLocation loc = locs.getDefaultRegionLocation();
+    RegionInfo info = loc.getRegion();
+    if (info == null) {
+      onLocateComplete(req, null,
+        new HBaseIOException(String.format("HRegionInfo is null for '%s', row='%s', locateType=%s",
+          tableName, Bytes.toStringBinary(req.row), req.locateType)));
+      return false;
+    }
+    return true;
+  }
+
+  protected abstract void locate(LocateRequest req);
+
+  abstract CompletableFuture<List<HRegionLocation>>
+    getAllRegionLocations(boolean excludeOfflinedSplitParents);
+
+  CompletableFuture<RegionLocations> getRegionLocations(byte[] row, int replicaId,
+    RegionLocateType locateType, boolean reload) {
+    if (locateType.equals(RegionLocateType.AFTER)) {
+      row = createClosestRowAfter(row);
+      locateType = RegionLocateType.CURRENT;
+    }
+    if (!reload) {
+      RegionLocations locs = cache.locate(tableName, row, replicaId, locateType);
+      if (isGood(locs, replicaId)) {
+        return CompletableFuture.completedFuture(locs);
+      }
+    }
+    CompletableFuture<RegionLocations> future;
+    LocateRequest req;
+    boolean sendRequest = false;
+    synchronized (this) {
+      // check again
+      if (!reload) {
+        RegionLocations locs = cache.locate(tableName, row, replicaId, locateType);
+        if (isGood(locs, replicaId)) {
+          return CompletableFuture.completedFuture(locs);
+        }
+      }
+      req = new LocateRequest(row, locateType);
+      future = allRequests.get(req);
+      if (future == null) {
+        future = new CompletableFuture<>();
+        allRequests.put(req, future);
+        if (hasQuota() && !pendingRequests.contains(req)) {
+          pendingRequests.add(req);
+          sendRequest = true;
+        }
+      }
+    }
+    if (sendRequest) {
+      locate(req);
+    }
+    return future;
+  }
+
+  void addToCache(RegionLocations locs) {
+    cache.add(locs);
+  }
+
+  // notice that this is not a constant time operation, do not call it on critical path.
+  int getCacheSize() {
+    return cache.size();
+  }
+
+  void clearPendingRequests() {
+    synchronized (this) {
+      if (!allRequests.isEmpty()) {
+        IOException error = new IOException("Cache cleared");
+        for (CompletableFuture<?> future : allRequests.values()) {
+          future.completeExceptionally(error);
+        }
+      }
+    }
+  }
+
+  void clearCache(ServerName serverName) {
+    cache.clearCache(serverName);
+  }
+
+  void removeLocationFromCache(HRegionLocation loc) {
+    cache.removeLocationFromCache(loc);
+  }
+
+  RegionLocations getInCache(byte[] key) {
+    return cache.get(key);
+  }
+
+  // only used for testing whether we have cached the location for a region.
+  @RestrictedApi(explanation = "Should only be called in AsyncRegionLocator",
+    link = "", allowedOnPath = ".*/AsyncRegionLocator.java")
+  RegionLocations locateInCache(byte[] row) {
+    return cache.locate(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID,
+      RegionLocateType.CURRENT);
+  }
+
+  // only used for testing whether we have cached the location for a table.
+  @RestrictedApi(explanation = "Should only be called in AsyncRegionLocator",
+    link = "", allowedOnPath = ".*/AsyncRegionLocator.java")
+  int getNumberOfCachedRegionLocations() {
+    return cache.getNumberOfCachedRegionLocations();
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 25a98ed..5c24d98 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -86,11 +86,11 @@ class AsyncConnectionImpl implements AsyncConnection {
 
   final AsyncConnectionConfiguration connConf;
 
-  private final User user;
+  final User user;
 
   final ConnectionRegistry registry;
 
-  private final int rpcTimeout;
+  final int rpcTimeout;
 
   protected final RpcClient rpcClient;
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java
deleted file mode 100644
index 5ae9de6..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.canUpdateOnError;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.removeRegionLocation;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.replaceRegionLocation;
-
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.atomic.AtomicReference;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * The asynchronous locator for meta region.
- */
-@InterfaceAudience.Private
-class AsyncMetaRegionLocator {
-
-  private final ConnectionRegistry registry;
-
-  private final AtomicReference<RegionLocations> metaRegionLocations = new AtomicReference<>();
-
-  private final AtomicReference<CompletableFuture<RegionLocations>> metaRelocateFuture =
-    new AtomicReference<>();
-
-  AsyncMetaRegionLocator(ConnectionRegistry registry) {
-    this.registry = registry;
-  }
-
-  /**
-   * Get the region locations for meta region. If the location for the given replica is not
-   * available in the cached locations, then fetch from the HBase cluster.
-   * <p/>
-   * The <code>replicaId</code> parameter is important. If the region replication config for meta
-   * region is changed, then the cached region locations may not have the locations for new
-   * replicas. If we do not check the location for the given replica, we will always return the
-   * cached region locations and cause an infinite loop.
-   */
-  CompletableFuture<RegionLocations> getRegionLocations(int replicaId, boolean reload) {
-    return ConnectionUtils.getOrFetch(metaRegionLocations, metaRelocateFuture, reload,
-      registry::getMetaRegionLocations, locs -> isGood(locs, replicaId), "meta region location");
-  }
-
-  private HRegionLocation getCacheLocation(HRegionLocation loc) {
-    RegionLocations locs = metaRegionLocations.get();
-    return locs != null ? locs.getRegionLocation(loc.getRegion().getReplicaId()) : null;
-  }
-
-  private void addLocationToCache(HRegionLocation loc) {
-    for (;;) {
-      int replicaId = loc.getRegion().getReplicaId();
-      RegionLocations oldLocs = metaRegionLocations.get();
-      if (oldLocs == null) {
-        RegionLocations newLocs = createRegionLocations(loc);
-        if (metaRegionLocations.compareAndSet(null, newLocs)) {
-          return;
-        }
-      }
-      HRegionLocation oldLoc = oldLocs.getRegionLocation(replicaId);
-      if (oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() ||
-        oldLoc.getServerName().equals(loc.getServerName()))) {
-        return;
-      }
-      RegionLocations newLocs = replaceRegionLocation(oldLocs, loc);
-      if (metaRegionLocations.compareAndSet(oldLocs, newLocs)) {
-        return;
-      }
-    }
-  }
-
-  private void removeLocationFromCache(HRegionLocation loc) {
-    for (;;) {
-      RegionLocations oldLocs = metaRegionLocations.get();
-      if (oldLocs == null) {
-        return;
-      }
-      HRegionLocation oldLoc = oldLocs.getRegionLocation(loc.getRegion().getReplicaId());
-      if (!canUpdateOnError(loc, oldLoc)) {
-        return;
-      }
-      RegionLocations newLocs = removeRegionLocation(oldLocs, loc.getRegion().getReplicaId());
-      if (metaRegionLocations.compareAndSet(oldLocs, newLocs)) {
-        return;
-      }
-    }
-  }
-
-  void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
-    AsyncRegionLocatorHelper.updateCachedLocationOnError(loc, exception, this::getCacheLocation,
-      this::addLocationToCache, this::removeLocationFromCache, null);
-  }
-
-  void clearCache() {
-    metaRegionLocations.set(null);
-  }
-
-  void clearCache(ServerName serverName) {
-    for (;;) {
-      RegionLocations locs = metaRegionLocations.get();
-      if (locs == null) {
-        return;
-      }
-      RegionLocations newLocs = locs.removeByServer(serverName);
-      if (locs == newLocs) {
-        return;
-      }
-      if (newLocs.isEmpty()) {
-        newLocs = null;
-      }
-      if (metaRegionLocations.compareAndSet(locs, newLocs)) {
-        return;
-      }
-    }
-  }
-
-  // only used for testing whether we have cached the location for a region.
-  RegionLocations getRegionLocationInCache() {
-    return metaRegionLocations.get();
-  }
-
-  // only used for testing whether we have cached the location for a table.
-  int getNumberOfCachedRegionLocations() {
-    RegionLocations locs = metaRegionLocations.get();
-    return locs != null ? locs.numNonNullElements() : 0;
-  }
-}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaTableRegionLocator.java
new file mode 100644
index 0000000..c8ffa24
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaTableRegionLocator.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.MetaCellComparator;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService.Interface;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionRequest;
+
+/**
+ * The class for locating region for meta table.
+ */
+@InterfaceAudience.Private
+class AsyncMetaTableRegionLocator extends AbstractAsyncTableRegionLocator {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AsyncMetaTableRegionLocator.class);
+
+  private final AtomicReference<Interface> stub = new AtomicReference<>();
+
+  private final AtomicReference<CompletableFuture<Interface>> stubMakeFuture =
+    new AtomicReference<>();
+
+  AsyncMetaTableRegionLocator(AsyncConnectionImpl conn, TableName tableName, int maxConcurrent) {
+    // for meta region we should use MetaCellComparator to compare the row keys
+    super(conn, tableName, maxConcurrent, (r1, r2) -> MetaCellComparator
+      .compareRows(r1, 0, r1.length, r2, 0, r2.length));
+  }
+
+  private Interface createStub(ServerName serverName) throws IOException {
+    return ClientMetaService.newStub(conn.rpcClient.createRpcChannel(serverName, conn.user,
+      (int) TimeUnit.NANOSECONDS.toMillis(conn.connConf.getReadRpcTimeoutNs())));
+  }
+
+  CompletableFuture<Interface> getStub() {
+    return ConnectionUtils.getOrFetch(stub, stubMakeFuture, false, () -> {
+      CompletableFuture<Interface> future = new CompletableFuture<>();
+      addListener(conn.registry.getActiveMaster(), (addr, error) -> {
+        if (error != null) {
+          future.completeExceptionally(error);
+        } else if (addr == null) {
+          future.completeExceptionally(new MasterNotRunningException(
+            "ZooKeeper available but no active master location found"));
+        } else {
+          LOG.debug("The fetched master address is {}", addr);
+          try {
+            future.complete(createStub(addr));
+          } catch (IOException e) {
+            future.completeExceptionally(e);
+          }
+        }
+
+      });
+      return future;
+    }, stub -> true, "ClientLocateMetaStub");
+  }
+
+  private void tryClearMasterStubCache(IOException error, Interface currentStub) {
+    if (ClientExceptionsUtil.isConnectionException(error) ||
+      error instanceof ServerNotRunningYetException) {
+      stub.compareAndSet(currentStub, null);
+    }
+  }
+
+  @Override
+  protected void locate(LocateRequest req) {
+    addListener(getStub(), (stub, error) -> {
+      if (error != null) {
+        onLocateComplete(req, null, error);
+        return;
+      }
+      HBaseRpcController controller = conn.rpcControllerFactory.newController();
+      stub.locateMetaRegion(controller,
+        LocateMetaRegionRequest.newBuilder().setRow(ByteString.copyFrom(req.row))
+          .setLocateType(ProtobufUtil.toProtoRegionLocateType(req.locateType)).build(),
+        resp -> {
+          if (controller.failed()) {
+            IOException ex = controller.getFailed();
+            tryClearMasterStubCache(ex, stub);
+            onLocateComplete(req, null, ex);
+            return;
+          }
+          RegionLocations locs = new RegionLocations(resp.getMetaLocationsList().stream()
+            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList()));
+          if (validateRegionLocations(locs, req)) {
+            onLocateComplete(req, locs, null);
+          }
+        });
+    });
+  }
+
+  @Override
+  CompletableFuture<List<HRegionLocation>>
+    getAllRegionLocations(boolean excludeOfflinedSplitParents) {
+    CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
+    addListener(getStub(), (stub, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      HBaseRpcController controller = conn.rpcControllerFactory.newController();
+      stub.getAllMetaRegionLocations(controller, GetAllMetaRegionLocationsRequest.newBuilder()
+        .setExcludeOfflinedSplitParents(excludeOfflinedSplitParents).build(), resp -> {
+          if (controller.failed()) {
+            IOException ex = controller.getFailed();
+            tryClearMasterStubCache(ex, stub);
+            future.completeExceptionally(ex);
+            return;
+          }
+          List<HRegionLocation> locs = resp.getMetaLocationsList().stream()
+            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList());
+          future.complete(locs);
+        });
+    });
+    return future;
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
deleted file mode 100644
index 1c686ac..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ /dev/null
@@ -1,725 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.apache.hadoop.hbase.HConstants.DEFAULT_USE_META_REPLICAS;
-import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
-import static org.apache.hadoop.hbase.HConstants.NINES;
-import static org.apache.hadoop.hbase.HConstants.USE_META_REPLICAS;
-import static org.apache.hadoop.hbase.HConstants.ZEROES;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.canUpdateOnError;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.removeRegionLocation;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
-import static org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
-import static org.apache.hadoop.hbase.client.RegionLocator.LOCATOR_META_REPLICAS_MODE;
-import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
-import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentNavigableMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang3.ObjectUtils;
-import org.apache.hadoop.hbase.CatalogFamilyFormat;
-import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.Scan.ReadType;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.common.base.Objects;
-
-/**
- * The asynchronous locator for regions other than meta.
- */
-@InterfaceAudience.Private
-class AsyncNonMetaRegionLocator {
-
-  private static final Logger LOG = LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class);
-
-  static final String MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE =
-    "hbase.client.meta.max.concurrent.locate.per.table";
-
-  private static final int DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8;
-
-  static String LOCATE_PREFETCH_LIMIT = "hbase.client.locate.prefetch.limit";
-
-  private static final int DEFAULT_LOCATE_PREFETCH_LIMIT = 10;
-
-  private final AsyncConnectionImpl conn;
-
-  private final int maxConcurrentLocateRequestPerTable;
-
-  private final int locatePrefetchLimit;
-
-  // The mode tells if HedgedRead, LoadBalance mode is supported.
-  // The default mode is CatalogReplicaMode.None.
-  private CatalogReplicaMode metaReplicaMode;
-  private CatalogReplicaLoadBalanceSelector metaReplicaSelector;
-
-  private final ConcurrentMap<TableName, TableCache> cache = new ConcurrentHashMap<>();
-
-  private static final class LocateRequest {
-
-    private final byte[] row;
-
-    private final RegionLocateType locateType;
-
-    public LocateRequest(byte[] row, RegionLocateType locateType) {
-      this.row = row;
-      this.locateType = locateType;
-    }
-
-    @Override
-    public int hashCode() {
-      return Bytes.hashCode(row) ^ locateType.hashCode();
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (obj == null || obj.getClass() != LocateRequest.class) {
-        return false;
-      }
-      LocateRequest that = (LocateRequest) obj;
-      return locateType.equals(that.locateType) && Bytes.equals(row, that.row);
-    }
-  }
-
-  private static final class TableCache {
-
-    private final ConcurrentNavigableMap<byte[], RegionLocations> cache =
-      new ConcurrentSkipListMap<>(BYTES_COMPARATOR);
-
-    private final Set<LocateRequest> pendingRequests = new HashSet<>();
-
-    private final Map<LocateRequest, CompletableFuture<RegionLocations>> allRequests =
-      new LinkedHashMap<>();
-
-    public boolean hasQuota(int max) {
-      return pendingRequests.size() < max;
-    }
-
-    public boolean isPending(LocateRequest req) {
-      return pendingRequests.contains(req);
-    }
-
-    public void send(LocateRequest req) {
-      pendingRequests.add(req);
-    }
-
-    public Optional<LocateRequest> getCandidate() {
-      return allRequests.keySet().stream().filter(r -> !isPending(r)).findFirst();
-    }
-
-    public void clearCompletedRequests(RegionLocations locations) {
-      for (Iterator<Map.Entry<LocateRequest, CompletableFuture<RegionLocations>>> iter =
-        allRequests.entrySet().iterator(); iter.hasNext();) {
-        Map.Entry<LocateRequest, CompletableFuture<RegionLocations>> entry = iter.next();
-        if (tryComplete(entry.getKey(), entry.getValue(), locations)) {
-          iter.remove();
-        }
-      }
-    }
-
-    private boolean tryComplete(LocateRequest req, CompletableFuture<RegionLocations> future,
-        RegionLocations locations) {
-      if (future.isDone()) {
-        return true;
-      }
-      if (locations == null) {
-        return false;
-      }
-      HRegionLocation loc = ObjectUtils.firstNonNull(locations.getRegionLocations());
-      // we should at least have one location available, otherwise the request should fail and
-      // should not arrive here
-      assert loc != null;
-      boolean completed;
-      if (req.locateType.equals(RegionLocateType.BEFORE)) {
-        // for locating the row before current row, the common case is to find the previous region
-        // in reverse scan, so we check the endKey first. In general, the condition should be
-        // startKey < req.row and endKey >= req.row. Here we split it to endKey == req.row ||
-        // (endKey > req.row && startKey < req.row). The two conditions are equal since startKey <
-        // endKey.
-        byte[] endKey = loc.getRegion().getEndKey();
-        int c = Bytes.compareTo(endKey, req.row);
-        completed = c == 0 || ((c > 0 || Bytes.equals(EMPTY_END_ROW, endKey)) &&
-          Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0);
-      } else {
-        completed = loc.getRegion().containsRow(req.row);
-      }
-      if (completed) {
-        future.complete(locations);
-        return true;
-      } else {
-        return false;
-      }
-    }
-  }
-
-  AsyncNonMetaRegionLocator(AsyncConnectionImpl conn) {
-    this.conn = conn;
-    this.maxConcurrentLocateRequestPerTable = conn.getConfiguration().getInt(
-      MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE, DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE);
-    this.locatePrefetchLimit =
-      conn.getConfiguration().getInt(LOCATE_PREFETCH_LIMIT, DEFAULT_LOCATE_PREFETCH_LIMIT);
-
-    // Get the region locator's meta replica mode.
-    this.metaReplicaMode = CatalogReplicaMode.fromString(conn.getConfiguration()
-      .get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString()));
-
-    switch (this.metaReplicaMode) {
-      case LOAD_BALANCE:
-        String replicaSelectorClass = conn.getConfiguration().
-          get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR,
-          CatalogReplicaLoadBalanceSimpleSelector.class.getName());
-
-        this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory.createSelector(
-          replicaSelectorClass, META_TABLE_NAME, conn, () -> {
-            int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
-            try {
-              RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get(
-                conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
-              numOfReplicas = metaLocations.size();
-            } catch (Exception e) {
-              LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
-            }
-            return numOfReplicas;
-          });
-        break;
-      case NONE:
-        // If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config.
-        boolean useMetaReplicas = conn.getConfiguration().getBoolean(USE_META_REPLICAS,
-          DEFAULT_USE_META_REPLICAS);
-        if (useMetaReplicas) {
-          this.metaReplicaMode = CatalogReplicaMode.HEDGED_READ;
-        }
-        break;
-      default:
-        // Doing nothing
-    }
-  }
-
-  private TableCache getTableCache(TableName tableName) {
-    return computeIfAbsent(cache, tableName, TableCache::new);
-  }
-
-  private boolean isEqual(RegionLocations locs1, RegionLocations locs2) {
-    HRegionLocation[] locArr1 = locs1.getRegionLocations();
-    HRegionLocation[] locArr2 = locs2.getRegionLocations();
-    if (locArr1.length != locArr2.length) {
-      return false;
-    }
-    for (int i = 0; i < locArr1.length; i++) {
-      // do not need to compare region info
-      HRegionLocation loc1 = locArr1[i];
-      HRegionLocation loc2 = locArr2[i];
-      if (loc1 == null) {
-        if (loc2 != null) {
-          return false;
-        }
-      } else {
-        if (loc2 == null) {
-          return false;
-        }
-        if (loc1.getSeqNum() != loc2.getSeqNum()) {
-          return false;
-        }
-        if (!Objects.equal(loc1.getServerName(), loc2.getServerName())) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  // if we successfully add the locations to cache, return the locations, otherwise return the one
-  // which prevents us being added. The upper layer can use this value to complete pending requests.
-  private RegionLocations addToCache(TableCache tableCache, RegionLocations locs) {
-    LOG.trace("Try adding {} to cache", locs);
-    byte[] startKey = locs.getRegionLocation().getRegion().getStartKey();
-    for (;;) {
-      RegionLocations oldLocs = tableCache.cache.putIfAbsent(startKey, locs);
-      if (oldLocs == null) {
-        return locs;
-      }
-      // check whether the regions are the same, this usually happens when table is split/merged, or
-      // deleted and recreated again.
-      RegionInfo region = locs.getRegionLocation().getRegion();
-      RegionInfo oldRegion = oldLocs.getRegionLocation().getRegion();
-      if (region.getEncodedName().equals(oldRegion.getEncodedName())) {
-        RegionLocations mergedLocs = oldLocs.mergeLocations(locs);
-        if (isEqual(mergedLocs, oldLocs)) {
-          // the merged one is the same with the old one, give up
-          LOG.trace("Will not add {} to cache because the old value {} " +
-            " is newer than us or has the same server name." +
-            " Maybe it is updated before we replace it", locs, oldLocs);
-          return oldLocs;
-        }
-        if (tableCache.cache.replace(startKey, oldLocs, mergedLocs)) {
-          return mergedLocs;
-        }
-      } else {
-        // the region is different, here we trust the one we fetched. This maybe wrong but finally
-        // the upper layer can detect this and trigger removal of the wrong locations
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("The newnly fetch region {} is different from the old one {} for row '{}'," +
-            " try replaing the old one...", region, oldRegion, Bytes.toStringBinary(startKey));
-        }
-        if (tableCache.cache.replace(startKey, oldLocs, locs)) {
-          return locs;
-        }
-      }
-    }
-  }
-
-  private void complete(TableName tableName, LocateRequest req, RegionLocations locs,
-      Throwable error) {
-    if (error != null) {
-      LOG.warn("Failed to locate region in '" + tableName + "', row='" +
-        Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType, error);
-    }
-    Optional<LocateRequest> toSend = Optional.empty();
-    TableCache tableCache = getTableCache(tableName);
-    if (locs != null) {
-      RegionLocations addedLocs = addToCache(tableCache, locs);
-      synchronized (tableCache) {
-        tableCache.pendingRequests.remove(req);
-        tableCache.clearCompletedRequests(addedLocs);
-        // Remove a complete locate request in a synchronized block, so the table cache must have
-        // quota to send a candidate request.
-        toSend = tableCache.getCandidate();
-        toSend.ifPresent(r -> tableCache.send(r));
-      }
-      toSend.ifPresent(r -> locateInMeta(tableName, r));
-    } else {
-      // we meet an error
-      assert error != null;
-      synchronized (tableCache) {
-        tableCache.pendingRequests.remove(req);
-        // fail the request itself, no matter whether it is a DoNotRetryIOException, as we have
-        // already retried several times
-        CompletableFuture<?> future = tableCache.allRequests.remove(req);
-        if (future != null) {
-          future.completeExceptionally(error);
-        }
-        tableCache.clearCompletedRequests(null);
-        // Remove a complete locate request in a synchronized block, so the table cache must have
-        // quota to send a candidate request.
-        toSend = tableCache.getCandidate();
-        toSend.ifPresent(r -> tableCache.send(r));
-      }
-      toSend.ifPresent(r -> locateInMeta(tableName, r));
-    }
-  }
-
-  // return whether we should stop the scan
-  private boolean onScanNext(TableName tableName, LocateRequest req, Result result) {
-    RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("The fetched location of '{}', row='{}', locateType={} is {}", tableName,
-        Bytes.toStringBinary(req.row), req.locateType, locs);
-    }
-    // remove HRegionLocation with null location, i.e, getServerName returns null.
-    if (locs != null) {
-      locs = locs.removeElementsWithNullLocation();
-    }
-
-    // the default region location should always be presented when fetching from meta, otherwise
-    // let's fail the request.
-    if (locs == null || locs.getDefaultRegionLocation() == null) {
-      complete(tableName, req, null,
-        new HBaseIOException(String.format("No location found for '%s', row='%s', locateType=%s",
-          tableName, Bytes.toStringBinary(req.row), req.locateType)));
-      return true;
-    }
-    HRegionLocation loc = locs.getDefaultRegionLocation();
-    RegionInfo info = loc.getRegion();
-    if (info == null) {
-      complete(tableName, req, null,
-        new HBaseIOException(String.format("HRegionInfo is null for '%s', row='%s', locateType=%s",
-          tableName, Bytes.toStringBinary(req.row), req.locateType)));
-      return true;
-    }
-    if (info.isSplitParent()) {
-      return false;
-    }
-    complete(tableName, req, locs, null);
-    return true;
-  }
-
-  private void recordCacheHit() {
-    conn.getConnectionMetrics().ifPresent(MetricsConnection::incrMetaCacheHit);
-  }
-
-  private void recordCacheMiss() {
-    conn.getConnectionMetrics().ifPresent(MetricsConnection::incrMetaCacheMiss);
-  }
-
-  private RegionLocations locateRowInCache(TableCache tableCache, TableName tableName, byte[] row,
-      int replicaId) {
-    Map.Entry<byte[], RegionLocations> entry = tableCache.cache.floorEntry(row);
-    if (entry == null) {
-      recordCacheMiss();
-      return null;
-    }
-    RegionLocations locs = entry.getValue();
-    HRegionLocation loc = locs.getRegionLocation(replicaId);
-    if (loc == null) {
-      recordCacheMiss();
-      return null;
-    }
-    byte[] endKey = loc.getRegion().getEndKey();
-    if (isEmptyStopRow(endKey) || Bytes.compareTo(row, endKey) < 0) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
-          Bytes.toStringBinary(row), RegionLocateType.CURRENT, replicaId);
-      }
-      recordCacheHit();
-      return locs;
-    } else {
-      recordCacheMiss();
-      return null;
-    }
-  }
-
-  private RegionLocations locateRowBeforeInCache(TableCache tableCache, TableName tableName,
-      byte[] row, int replicaId) {
-    boolean isEmptyStopRow = isEmptyStopRow(row);
-    Map.Entry<byte[], RegionLocations> entry =
-      isEmptyStopRow ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row);
-    if (entry == null) {
-      recordCacheMiss();
-      return null;
-    }
-    RegionLocations locs = entry.getValue();
-    HRegionLocation loc = locs.getRegionLocation(replicaId);
-    if (loc == null) {
-      recordCacheMiss();
-      return null;
-    }
-    if (isEmptyStopRow(loc.getRegion().getEndKey()) ||
-      (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
-          Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId);
-      }
-      recordCacheHit();
-      return locs;
-    } else {
-      recordCacheMiss();
-      return null;
-    }
-  }
-
-  private void locateInMeta(TableName tableName, LocateRequest req) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row) +
-        "', locateType=" + req.locateType + " in meta");
-    }
-    byte[] metaStartKey;
-    if (req.locateType.equals(RegionLocateType.BEFORE)) {
-      if (isEmptyStopRow(req.row)) {
-        byte[] binaryTableName = tableName.getName();
-        metaStartKey = Arrays.copyOf(binaryTableName, binaryTableName.length + 1);
-      } else {
-        metaStartKey = createRegionName(tableName, req.row, ZEROES, false);
-      }
-    } else {
-      metaStartKey = createRegionName(tableName, req.row, NINES, false);
-    }
-    byte[] metaStopKey =
-      RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false);
-    Scan scan = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey, true)
-      .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(locatePrefetchLimit)
-      .setReadType(ReadType.PREAD);
-
-    switch (this.metaReplicaMode) {
-      case LOAD_BALANCE:
-        int metaReplicaId = this.metaReplicaSelector.select(tableName, req.row, req.locateType);
-        if (metaReplicaId != RegionInfo.DEFAULT_REPLICA_ID) {
-          // If the selector gives a non-primary meta replica region, then go with it.
-          // Otherwise, just go to primary in non-hedgedRead mode.
-          scan.setConsistency(Consistency.TIMELINE);
-          scan.setReplicaId(metaReplicaId);
-        }
-        break;
-      case HEDGED_READ:
-        scan.setConsistency(Consistency.TIMELINE);
-        break;
-      default:
-        // do nothing
-    }
-
-    conn.getTable(META_TABLE_NAME).scan(scan, new AdvancedScanResultConsumer() {
-
-      private boolean completeNormally = false;
-
-      private boolean tableNotFound = true;
-
-      @Override
-      public void onError(Throwable error) {
-        complete(tableName, req, null, error);
-      }
-
-      @Override
-      public void onComplete() {
-        if (tableNotFound) {
-          complete(tableName, req, null, new TableNotFoundException(tableName));
-        } else if (!completeNormally) {
-          complete(tableName, req, null, new IOException(
-            "Unable to find region for '" + Bytes.toStringBinary(req.row) + "' in " + tableName));
-        }
-      }
-
-      @Override
-      public void onNext(Result[] results, ScanController controller) {
-        if (results.length == 0) {
-          return;
-        }
-        tableNotFound = false;
-        int i = 0;
-        for (; i < results.length; i++) {
-          if (onScanNext(tableName, req, results[i])) {
-            completeNormally = true;
-            controller.terminate();
-            i++;
-            break;
-          }
-        }
-        // Add the remaining results into cache
-        if (i < results.length) {
-          TableCache tableCache = getTableCache(tableName);
-          for (; i < results.length; i++) {
-            RegionLocations locs = CatalogFamilyFormat.getRegionLocations(results[i]);
-            if (locs == null) {
-              continue;
-            }
-            HRegionLocation loc = locs.getDefaultRegionLocation();
-            if (loc == null) {
-              continue;
-            }
-            RegionInfo info = loc.getRegion();
-            if (info == null || info.isOffline() || info.isSplitParent()) {
-              continue;
-            }
-            RegionLocations addedLocs = addToCache(tableCache, locs);
-            synchronized (tableCache) {
-              tableCache.clearCompletedRequests(addedLocs);
-            }
-          }
-        }
-      }
-    });
-  }
-
-  private RegionLocations locateInCache(TableCache tableCache, TableName tableName, byte[] row,
-      int replicaId, RegionLocateType locateType) {
-    return locateType.equals(RegionLocateType.BEFORE)
-      ? locateRowBeforeInCache(tableCache, tableName, row, replicaId)
-      : locateRowInCache(tableCache, tableName, row, replicaId);
-  }
-
-  // locateToPrevious is true means we will use the start key of a region to locate the region
-  // placed before it. Used for reverse scan. See the comment of
-  // AsyncRegionLocator.getPreviousRegionLocation.
-  private CompletableFuture<RegionLocations> getRegionLocationsInternal(TableName tableName,
-      byte[] row, int replicaId, RegionLocateType locateType, boolean reload) {
-    // AFTER should be convert to CURRENT before calling this method
-    assert !locateType.equals(RegionLocateType.AFTER);
-    TableCache tableCache = getTableCache(tableName);
-    if (!reload) {
-      RegionLocations locs = locateInCache(tableCache, tableName, row, replicaId, locateType);
-      if (isGood(locs, replicaId)) {
-        return CompletableFuture.completedFuture(locs);
-      }
-    }
-    CompletableFuture<RegionLocations> future;
-    LocateRequest req;
-    boolean sendRequest = false;
-    synchronized (tableCache) {
-      // check again
-      if (!reload) {
-        RegionLocations locs = locateInCache(tableCache, tableName, row, replicaId, locateType);
-        if (isGood(locs, replicaId)) {
-          return CompletableFuture.completedFuture(locs);
-        }
-      }
-      req = new LocateRequest(row, locateType);
-      future = tableCache.allRequests.get(req);
-      if (future == null) {
-        future = new CompletableFuture<>();
-        tableCache.allRequests.put(req, future);
-        if (tableCache.hasQuota(maxConcurrentLocateRequestPerTable) && !tableCache.isPending(req)) {
-          tableCache.send(req);
-          sendRequest = true;
-        }
-      }
-    }
-    if (sendRequest) {
-      locateInMeta(tableName, req);
-    }
-    return future;
-  }
-
-  CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
-      int replicaId, RegionLocateType locateType, boolean reload) {
-    // as we know the exact row after us, so we can just create the new row, and use the same
-    // algorithm to locate it.
-    if (locateType.equals(RegionLocateType.AFTER)) {
-      row = createClosestRowAfter(row);
-      locateType = RegionLocateType.CURRENT;
-    }
-    return getRegionLocationsInternal(tableName, row, replicaId, locateType, reload);
-  }
-
-  private void recordClearRegionCache() {
-    conn.getConnectionMetrics().ifPresent(MetricsConnection::incrMetaCacheNumClearRegion);
-  }
-
-  private void removeLocationFromCache(HRegionLocation loc) {
-    TableCache tableCache = cache.get(loc.getRegion().getTable());
-    if (tableCache == null) {
-      return;
-    }
-    byte[] startKey = loc.getRegion().getStartKey();
-    for (;;) {
-      RegionLocations oldLocs = tableCache.cache.get(startKey);
-      if (oldLocs == null) {
-        return;
-      }
-      HRegionLocation oldLoc = oldLocs.getRegionLocation(loc.getRegion().getReplicaId());
-      if (!canUpdateOnError(loc, oldLoc)) {
-        return;
-      }
-      // Tell metaReplicaSelector that the location is stale. It will create a stale entry
-      // with timestamp internally. Next time the client looks up the same location,
-      // it will pick a different meta replica region.
-      if (this.metaReplicaMode == CatalogReplicaMode.LOAD_BALANCE) {
-        metaReplicaSelector.onError(loc);
-      }
-
-      RegionLocations newLocs = removeRegionLocation(oldLocs, loc.getRegion().getReplicaId());
-      if (newLocs == null) {
-        if (tableCache.cache.remove(startKey, oldLocs)) {
-          recordClearRegionCache();
-          return;
-        }
-      } else {
-        if (tableCache.cache.replace(startKey, oldLocs, newLocs)) {
-          recordClearRegionCache();
-          return;
-        }
-      }
-    }
-  }
-
-  private void addLocationToCache(HRegionLocation loc) {
-    addToCache(getTableCache(loc.getRegion().getTable()), createRegionLocations(loc));
-  }
-
-  private HRegionLocation getCachedLocation(HRegionLocation loc) {
-    TableCache tableCache = cache.get(loc.getRegion().getTable());
-    if (tableCache == null) {
-      return null;
-    }
-    RegionLocations locs = tableCache.cache.get(loc.getRegion().getStartKey());
-    return locs != null ? locs.getRegionLocation(loc.getRegion().getReplicaId()) : null;
-  }
-
-  void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
-    Optional<MetricsConnection> connectionMetrics = conn.getConnectionMetrics();
-    AsyncRegionLocatorHelper.updateCachedLocationOnError(loc, exception, this::getCachedLocation,
-      this::addLocationToCache, this::removeLocationFromCache, connectionMetrics.orElse(null));
-  }
-
-  void clearCache(TableName tableName) {
-    TableCache tableCache = cache.remove(tableName);
-    if (tableCache == null) {
-      return;
-    }
-    synchronized (tableCache) {
-      if (!tableCache.allRequests.isEmpty()) {
-        IOException error = new IOException("Cache cleared");
-        tableCache.allRequests.values().forEach(f -> f.completeExceptionally(error));
-      }
-    }
-    conn.getConnectionMetrics()
-      .ifPresent(metrics -> metrics.incrMetaCacheNumClearRegion(tableCache.cache.size()));
-  }
-
-  void clearCache() {
-    cache.clear();
-  }
-
-  void clearCache(ServerName serverName) {
-    for (TableCache tableCache : cache.values()) {
-      for (Map.Entry<byte[], RegionLocations> entry : tableCache.cache.entrySet()) {
-        byte[] regionName = entry.getKey();
-        RegionLocations locs = entry.getValue();
-        RegionLocations newLocs = locs.removeByServer(serverName);
-        if (locs == newLocs) {
-          continue;
-        }
-        if (newLocs.isEmpty()) {
-          tableCache.cache.remove(regionName, locs);
-        } else {
-          tableCache.cache.replace(regionName, locs, newLocs);
-        }
-      }
-    }
-  }
-
-  // only used for testing whether we have cached the location for a region.
-  RegionLocations getRegionLocationInCache(TableName tableName, byte[] row) {
-    TableCache tableCache = cache.get(tableName);
-    if (tableCache == null) {
-      return null;
-    }
-    return locateRowInCache(tableCache, tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID);
-  }
-
-  // only used for testing whether we have cached the location for a table.
-  int getNumberOfCachedRegionLocations(TableName tableName) {
-    TableCache tableCache = cache.get(tableName);
-    if (tableCache == null) {
-      return 0;
-    }
-    return tableCache.cache.values().stream().mapToInt(RegionLocations::numNonNullElements).sum();
-  }
-}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaTableRegionLocator.java
new file mode 100644
index 0000000..c03e37c
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaTableRegionLocator.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_USE_META_REPLICAS;
+import static org.apache.hadoop.hbase.HConstants.USE_META_REPLICAS;
+import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
+import static org.apache.hadoop.hbase.client.RegionLocator.LOCATOR_META_REPLICAS_MODE;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.CatalogFamilyFormat;
+import org.apache.hadoop.hbase.ClientMetaTableAccessor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The class for locating region for table other than meta.
+ */
+@InterfaceAudience.Private
+class AsyncNonMetaTableRegionLocator extends AbstractAsyncTableRegionLocator {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AsyncNonMetaTableRegionLocator.class);
+
+  private final int prefetchLimit;
+
+  // The mode tells if HedgedRead, LoadBalance mode is supported.
+  // The default mode is CatalogReplicaMode.None.
+  private CatalogReplicaMode metaReplicaMode;
+
+  private CatalogReplicaLoadBalanceSelector metaReplicaSelector;
+
+  AsyncNonMetaTableRegionLocator(AsyncConnectionImpl conn, TableName tableName, int maxConcurrent,
+    int prefetchLimit) {
+    super(conn, tableName, maxConcurrent, Bytes.BYTES_COMPARATOR);
+    this.prefetchLimit = prefetchLimit;
+    // Get the region locator's meta replica mode.
+    this.metaReplicaMode = CatalogReplicaMode.fromString(conn.getConfiguration()
+      .get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString()));
+
+    switch (this.metaReplicaMode) {
+      case LOAD_BALANCE:
+        String replicaSelectorClass = conn.getConfiguration().
+          get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR,
+          CatalogReplicaLoadBalanceSimpleSelector.class.getName());
+
+        this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory.createSelector(
+          replicaSelectorClass, META_TABLE_NAME, conn, () -> {
+            int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
+            try {
+              RegionLocations metaLocations = conn.getLocator()
+                .getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
+                  RegionLocateType.CURRENT, true, conn.connConf.getReadRpcTimeoutNs())
+                .get();
+              numOfReplicas = metaLocations.size();
+            } catch (Exception e) {
+              LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
+            }
+            return numOfReplicas;
+          });
+        break;
+      case NONE:
+        // If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config.
+        boolean useMetaReplicas = conn.getConfiguration().getBoolean(USE_META_REPLICAS,
+          DEFAULT_USE_META_REPLICAS);
+        if (useMetaReplicas) {
+          this.metaReplicaMode = CatalogReplicaMode.HEDGED_READ;
+        }
+        break;
+      default:
+        // Doing nothing
+    }
+  }
+
+  // return whether we should stop the scan
+  private boolean onScanNext(TableName tableName, LocateRequest req, Result result) {
+    RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("The fetched location of '{}', row='{}', locateType={} is {}", tableName,
+        Bytes.toStringBinary(req.row), req.locateType, locs);
+    }
+    if (!validateRegionLocations(locs, req)) {
+      return true;
+    }
+    if (locs.getDefaultRegionLocation().getRegion().isSplitParent()) {
+      return false;
+    }
+    onLocateComplete(req, locs, null);
+    return true;
+  }
+
+  @Override
+  protected void locate(LocateRequest req) {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Try locate '{}', row='{}', locateType={} in meta", tableName,
+        Bytes.toStringBinary(req.row), req.locateType);
+    }
+    Scan scan =
+      CatalogFamilyFormat.createRegionLocateScan(tableName, req.row, req.locateType, prefetchLimit);
+    switch (this.metaReplicaMode) {
+      case LOAD_BALANCE:
+        int metaReplicaId = this.metaReplicaSelector.select(tableName, req.row, req.locateType);
+        if (metaReplicaId != RegionInfo.DEFAULT_REPLICA_ID) {
+          // If the selector gives a non-primary meta replica region, then go with it.
+          // Otherwise, just go to primary in non-hedgedRead mode.
+          scan.setConsistency(Consistency.TIMELINE);
+          scan.setReplicaId(metaReplicaId);
+        }
+        break;
+      case HEDGED_READ:
+        scan.setConsistency(Consistency.TIMELINE);
+        break;
+      default:
+        // do nothing
+    }
+    conn.getTable(TableName.META_TABLE_NAME).scan(scan, new AdvancedScanResultConsumer() {
+
+      private boolean completeNormally = false;
+
+      private boolean tableNotFound = true;
+
+      @Override
+      public void onError(Throwable error) {
+        onLocateComplete(req, null, error);
+      }
+
+      @Override
+      public void onComplete() {
+        if (tableNotFound) {
+          onLocateComplete(req, null, new TableNotFoundException(tableName));
+        } else if (!completeNormally) {
+          onLocateComplete(req, null, new IOException(
+            "Unable to find region for '" + Bytes.toStringBinary(req.row) + "' in " + tableName));
+        }
+      }
+
+      @Override
+      public void onNext(Result[] results, ScanController controller) {
+        if (results.length == 0) {
+          return;
+        }
+        tableNotFound = false;
+        int i = 0;
+        for (; i < results.length; i++) {
+          if (onScanNext(tableName, req, results[i])) {
+            completeNormally = true;
+            controller.terminate();
+            i++;
+            break;
+          }
+        }
+        // Add the remaining results into cache
+        if (i < results.length) {
+          for (; i < results.length; i++) {
+            RegionLocations locs = CatalogFamilyFormat.getRegionLocations(results[i]);
+            if (locs == null) {
+              continue;
+            }
+            HRegionLocation loc = locs.getDefaultRegionLocation();
+            if (loc == null) {
+              continue;
+            }
+            RegionInfo info = loc.getRegion();
+            if (info == null || info.isOffline() || info.isSplitParent()) {
+              continue;
+            }
+            RegionLocations addedLocs = cache.add(locs);
+            synchronized (this) {
+              clearCompletedRequests(addedLocs);
+            }
+          }
+        }
+      }
+    });
+  }
+
+  @Override
+  CompletableFuture<List<HRegionLocation>>
+    getAllRegionLocations(boolean excludeOfflinedSplitParents) {
+    return ClientMetaTableAccessor.getTableHRegionLocations(
+      conn.getTable(TableName.META_TABLE_NAME), tableName, excludeOfflinedSplitParents);
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
index 716598a..7bfb7f3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
@@ -17,20 +17,26 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
+import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations;
 import static org.apache.hadoop.hbase.trace.TraceUtil.REGION_NAMES_KEY;
 import static org.apache.hadoop.hbase.trace.TraceUtil.SERVER_NAME_KEY;
 import static org.apache.hadoop.hbase.trace.TraceUtil.createSpan;
 import static org.apache.hadoop.hbase.trace.TraceUtil.createTableSpan;
+import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent;
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import io.opentelemetry.api.trace.Span;
 import io.opentelemetry.api.trace.StatusCode;
 import io.opentelemetry.context.Scope;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.function.Supplier;
@@ -43,8 +49,6 @@ import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 import org.apache.hbase.thirdparty.io.netty.util.Timeout;
@@ -55,21 +59,40 @@ import org.apache.hbase.thirdparty.io.netty.util.Timeout;
 @InterfaceAudience.Private
 class AsyncRegionLocator {
 
-  private static final Logger LOG = LoggerFactory.getLogger(AsyncRegionLocator.class);
+  static final String MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE =
+    "hbase.client.meta.max.concurrent.locate.per.table";
+
+  private static final int DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8;
+
+  static final String MAX_CONCURRENT_LOCATE_META_REQUEST =
+    "hbase.client.meta.max.concurrent.locate";
+
+  static String LOCATE_PREFETCH_LIMIT = "hbase.client.locate.prefetch.limit";
+
+  private static final int DEFAULT_LOCATE_PREFETCH_LIMIT = 10;
 
   private final HashedWheelTimer retryTimer;
 
   private final AsyncConnectionImpl conn;
 
-  private final AsyncMetaRegionLocator metaRegionLocator;
+  private final int maxConcurrentLocateRequestPerTable;
+
+  private final int maxConcurrentLocateMetaRequest;
 
-  private final AsyncNonMetaRegionLocator nonMetaRegionLocator;
+  private final int locatePrefetchLimit;
 
-  AsyncRegionLocator(AsyncConnectionImpl conn, HashedWheelTimer retryTimer) {
+  private final ConcurrentMap<TableName, AbstractAsyncTableRegionLocator> table2Locator =
+    new ConcurrentHashMap<>();
+
+  public AsyncRegionLocator(AsyncConnectionImpl conn, HashedWheelTimer retryTimer) {
     this.conn = conn;
-    this.metaRegionLocator = new AsyncMetaRegionLocator(conn.registry);
-    this.nonMetaRegionLocator = new AsyncNonMetaRegionLocator(conn);
     this.retryTimer = retryTimer;
+    this.maxConcurrentLocateRequestPerTable = conn.getConfiguration().getInt(
+      MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE, DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE);
+    this.maxConcurrentLocateMetaRequest = conn.getConfiguration()
+      .getInt(MAX_CONCURRENT_LOCATE_META_REQUEST, maxConcurrentLocateRequestPerTable);
+    this.locatePrefetchLimit =
+      conn.getConfiguration().getInt(LOCATE_PREFETCH_LIMIT, DEFAULT_LOCATE_PREFETCH_LIMIT);
   }
 
   private <T> CompletableFuture<T> withTimeout(CompletableFuture<T> future, long timeoutNs,
@@ -127,13 +150,30 @@ class AsyncRegionLocator {
     return names;
   }
 
+  private AbstractAsyncTableRegionLocator getOrCreateTableRegionLocator(TableName tableName) {
+    return computeIfAbsent(table2Locator, tableName, () -> {
+      if (isMeta(tableName)) {
+        return new AsyncMetaTableRegionLocator(conn, tableName, maxConcurrentLocateMetaRequest);
+      } else {
+        return new AsyncNonMetaTableRegionLocator(conn, tableName,
+          maxConcurrentLocateRequestPerTable, locatePrefetchLimit);
+      }
+    });
+  }
+
+  CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
+    int replicaId, RegionLocateType locateType, boolean reload) {
+    return tracedLocationFuture(() -> {
+      return getOrCreateTableRegionLocator(tableName).getRegionLocations(row, replicaId, locateType,
+        reload);
+    }, this::getRegionName, tableName, "getRegionLocations");
+  }
+
   CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
     RegionLocateType type, boolean reload, long timeoutNs) {
     return tracedLocationFuture(() -> {
-      CompletableFuture<RegionLocations> future = isMeta(tableName) ?
-        metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) :
-        nonMetaRegionLocator.getRegionLocations(tableName, row,
-          RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload);
+      CompletableFuture<RegionLocations> future =
+        getRegionLocations(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload);
       return withTimeout(future, timeoutNs,
         () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) +
           "ms) waiting for region locations for " + tableName + ", row='" +
@@ -148,8 +188,7 @@ class AsyncRegionLocator {
       // Change it later if the meta table can have more than one regions.
       CompletableFuture<HRegionLocation> future = new CompletableFuture<>();
       CompletableFuture<RegionLocations> locsFuture =
-        isMeta(tableName) ? metaRegionLocator.getRegionLocations(replicaId, reload) :
-          nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload);
+        getRegionLocations(tableName, row, replicaId, type, reload);
       addListener(locsFuture, (locs, error) -> {
         if (error != null) {
           future.completeExceptionally(error);
@@ -193,30 +232,61 @@ class AsyncRegionLocator {
     return getRegionLocation(tableName, row, type, false, timeoutNs);
   }
 
-  void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
-    if (loc.getRegion().isMetaRegion()) {
-      metaRegionLocator.updateCachedLocationOnError(loc, exception);
-    } else {
-      nonMetaRegionLocator.updateCachedLocationOnError(loc, exception);
+  /**
+   * Get all region locations for a table.
+   * <p/>
+   * Notice that this method will not read from cache.
+   */
+  CompletableFuture<List<HRegionLocation>> getAllRegionLocations(TableName tableName,
+    boolean excludeOfflinedSplitParents) {
+    CompletableFuture<List<HRegionLocation>> future =
+      getOrCreateTableRegionLocator(tableName).getAllRegionLocations(excludeOfflinedSplitParents);
+    addListener(future, (locs, error) -> {
+      if (error != null) {
+        return;
+      }
+      // add locations to cache
+      AbstractAsyncTableRegionLocator locator = getOrCreateTableRegionLocator(tableName);
+      Map<RegionInfo, List<HRegionLocation>> map = new HashMap<>();
+      for (HRegionLocation loc : locs) {
+        // do not cache split parent
+        if (loc.getRegion() != null && !loc.getRegion().isSplitParent()) {
+          map.computeIfAbsent(RegionReplicaUtil.getRegionInfoForDefaultReplica(loc.getRegion()),
+            k -> new ArrayList<>()).add(loc);
+        }
+      }
+      for (List<HRegionLocation> l : map.values()) {
+        locator.addToCache(new RegionLocations(l));
+      }
+    });
+    return future;
+  }
+
+  private void removeLocationFromCache(HRegionLocation loc) {
+    AbstractAsyncTableRegionLocator locator = table2Locator.get(loc.getRegion().getTable());
+    if (locator == null) {
+      return;
     }
+    locator.removeLocationFromCache(loc);
   }
 
   void clearCache(TableName tableName) {
     TraceUtil.trace(() -> {
-      LOG.debug("Clear meta cache for {}", tableName);
-      if (tableName.equals(META_TABLE_NAME)) {
-        metaRegionLocator.clearCache();
-      } else {
-        nonMetaRegionLocator.clearCache(tableName);
-      }
+    AbstractAsyncTableRegionLocator locator = table2Locator.remove(tableName);
+    if (locator == null) {
+      return;
+    }
+    locator.clearPendingRequests();
+    conn.getConnectionMetrics()
+      .ifPresent(metrics -> metrics.incrMetaCacheNumClearRegion(locator.getCacheSize()));
     }, () -> createTableSpan("AsyncRegionLocator.clearCache", tableName));
   }
 
   void clearCache(ServerName serverName) {
     TraceUtil.trace(() -> {
-      LOG.debug("Clear meta cache for {}", serverName);
-      metaRegionLocator.clearCache(serverName);
-      nonMetaRegionLocator.clearCache(serverName);
+      for (AbstractAsyncTableRegionLocator locator : table2Locator.values()) {
+        locator.clearCache(serverName);
+      }
       conn.getConnectionMetrics().ifPresent(MetricsConnection::incrMetaCacheNumClearServer);
     }, () -> createSpan("AsyncRegionLocator.clearCache").setAttribute(SERVER_NAME_KEY,
       serverName.getServerName()));
@@ -224,30 +294,48 @@ class AsyncRegionLocator {
 
   void clearCache() {
     TraceUtil.trace(() -> {
-      metaRegionLocator.clearCache();
-      nonMetaRegionLocator.clearCache();
+      table2Locator.clear();
     }, "AsyncRegionLocator.clearCache");
   }
 
-  AsyncNonMetaRegionLocator getNonMetaRegionLocator() {
-    return nonMetaRegionLocator;
+  private void addLocationToCache(HRegionLocation loc) {
+    getOrCreateTableRegionLocator(loc.getRegion().getTable())
+      .addToCache(createRegionLocations(loc));
+  }
+
+  private HRegionLocation getCachedLocation(HRegionLocation loc) {
+    AbstractAsyncTableRegionLocator locator = table2Locator.get(loc.getRegion().getTable());
+    if (locator == null) {
+      return null;
+    }
+    RegionLocations locs = locator.getInCache(loc.getRegion().getStartKey());
+    return locs != null ? locs.getRegionLocation(loc.getRegion().getReplicaId()) : null;
+  }
+
+  void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
+    AsyncRegionLocatorHelper.updateCachedLocationOnError(loc, exception, this::getCachedLocation,
+      this::addLocationToCache, this::removeLocationFromCache, conn.getConnectionMetrics());
   }
 
   // only used for testing whether we have cached the location for a region.
+  @RestrictedApi(explanation = "Should only be called in tests", link = "",
+    allowedOnPath = ".*/src/test/.*")
   RegionLocations getRegionLocationInCache(TableName tableName, byte[] row) {
-    if (TableName.isMetaTableName(tableName)) {
-      return metaRegionLocator.getRegionLocationInCache();
-    } else {
-      return nonMetaRegionLocator.getRegionLocationInCache(tableName, row);
+    AbstractAsyncTableRegionLocator locator = table2Locator.get(tableName);
+    if (locator == null) {
+      return null;
     }
+    return locator.locateInCache(row);
   }
 
   // only used for testing whether we have cached the location for a table.
+  @RestrictedApi(explanation = "Should only be called in tests", link = "",
+    allowedOnPath = ".*/src/test/.*")
   int getNumberOfCachedRegionLocations(TableName tableName) {
-    if (TableName.isMetaTableName(tableName)) {
-      return metaRegionLocator.getNumberOfCachedRegionLocations();
-    } else {
-      return nonMetaRegionLocator.getNumberOfCachedRegionLocations(tableName);
+    AbstractAsyncTableRegionLocator locator = table2Locator.get(tableName);
+    if (locator == null) {
+      return 0;
     }
+    return locator.getNumberOfCachedRegionLocations();
   }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java
index 4c6cd5a..c34cc5a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
 import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.findException;
 import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.isMetaClearingException;
 import java.util.Arrays;
+import java.util.Optional;
 import java.util.function.Consumer;
 import java.util.function.Function;
 import org.apache.commons.lang3.ObjectUtils;
@@ -30,6 +31,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Objects;
+
 /**
  * Helper class for asynchronous region locator.
  */
@@ -55,9 +58,9 @@ final class AsyncRegionLocatorHelper {
   }
 
   static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception,
-      Function<HRegionLocation, HRegionLocation> cachedLocationSupplier,
-      Consumer<HRegionLocation> addToCache, Consumer<HRegionLocation> removeFromCache,
-      MetricsConnection metrics) {
+    Function<HRegionLocation, HRegionLocation> cachedLocationSupplier,
+    Consumer<HRegionLocation> addToCache, Consumer<HRegionLocation> removeFromCache,
+    Optional<MetricsConnection> metrics) {
     HRegionLocation oldLoc = cachedLocationSupplier.apply(loc);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Try updating {} , the old value is {}, error={}", loc, oldLoc,
@@ -85,9 +88,7 @@ final class AsyncRegionLocatorHelper {
       addToCache.accept(newLoc);
     } else {
       LOG.debug("Try removing {} from cache", loc);
-      if (metrics != null) {
-        metrics.incrCacheDroppingExceptions(exception);
-      }
+      metrics.ifPresent(m -> m.incrCacheDroppingExceptions(exception));
       removeFromCache.accept(loc);
     }
   }
@@ -146,4 +147,33 @@ final class AsyncRegionLocatorHelper {
     HRegionLocation loc = locs.getRegionLocation(replicaId);
     return loc != null && loc.getServerName() != null;
   }
+
+  static boolean isEqual(RegionLocations locs1, RegionLocations locs2) {
+    HRegionLocation[] locArr1 = locs1.getRegionLocations();
+    HRegionLocation[] locArr2 = locs2.getRegionLocations();
+    if (locArr1.length != locArr2.length) {
+      return false;
+    }
+    for (int i = 0; i < locArr1.length; i++) {
+      // do not need to compare region info
+      HRegionLocation loc1 = locArr1[i];
+      HRegionLocation loc2 = locArr2[i];
+      if (loc1 == null) {
+        if (loc2 != null) {
+          return false;
+        }
+      } else {
+        if (loc2 == null) {
+          return false;
+        }
+        if (loc1.getSeqNum() != loc2.getSeqNum()) {
+          return false;
+        }
+        if (!Objects.equal(loc1.getServerName(), loc2.getServerName())) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java
index 96e3ec4..5af3283 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java
@@ -104,8 +104,8 @@ public interface AsyncTableRegionLocator {
   /**
    * Retrieves all of the regions associated with this table.
    * <p/>
-   * Usually we will go to meta table directly in this method so there is no {@code reload}
-   * parameter.
+   * We will go to meta table directly in this method so there is no {@code reload} parameter. So
+   * please use with caution as this could generate great load to a cluster.
    * <p/>
    * Notice that the location for region replicas other than the default replica are also returned.
    * @return a {@link List} of all regions associated with this table.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
index 35bf0e0..2adf4df 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
@@ -22,7 +22,6 @@ import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture;
 import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
-import org.apache.hadoop.hbase.ClientMetaTableAccessor;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -56,14 +55,8 @@ class AsyncTableRegionLocatorImpl implements AsyncTableRegionLocator {
 
   @Override
   public CompletableFuture<List<HRegionLocation>> getAllRegionLocations() {
-    return tracedFuture(() -> {
-      if (TableName.isMetaTableName(tableName)) {
-        return conn.registry.getMetaRegionLocations()
-          .thenApply(locs -> Arrays.asList(locs.getRegionLocations()));
-      }
-      return ClientMetaTableAccessor
-        .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName);
-    }, getClass().getSimpleName() + ".getAllRegionLocations");
+    return tracedFuture(() -> conn.getLocator().getAllRegionLocations(tableName, true),
+      getClass().getSimpleName() + ".getAllRegionLocations");
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
index cd22d78..569d728 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.Closeable;
 import java.util.concurrent.CompletableFuture;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -32,11 +31,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 interface ConnectionRegistry extends Closeable {
 
   /**
-   * Get the location of meta region(s).
-   */
-  CompletableFuture<RegionLocations> getMetaRegionLocations();
-
-  /**
    * Should only be called once.
    * <p>
    * The upper layer should store this value somewhere as it will not be change any more.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 70312aa..d74e4aa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -172,24 +172,6 @@ public final class ConnectionUtils {
     return Arrays.copyOf(row, row.length + 1);
   }
 
-  /**
-   * Create a row before the specified row and very close to the specified row.
-   */
-  static byte[] createCloseRowBefore(byte[] row) {
-    if (row.length == 0) {
-      return MAX_BYTE_ARRAY;
-    }
-    if (row[row.length - 1] == 0) {
-      return Arrays.copyOf(row, row.length - 1);
-    } else {
-      byte[] nextRow = new byte[row.length + MAX_BYTE_ARRAY.length];
-      System.arraycopy(row, 0, nextRow, 0, row.length - 1);
-      nextRow[row.length - 1] = (byte) ((row[row.length - 1] & 0xFF) - 1);
-      System.arraycopy(MAX_BYTE_ARRAY, 0, nextRow, row.length, MAX_BYTE_ARRAY.length);
-      return nextRow;
-    }
-  }
-
   static boolean isEmptyStartRow(byte[] row) {
     return Bytes.equals(row, EMPTY_START_ROW);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
index 9223935..6caa8d5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
@@ -266,7 +266,7 @@ public class MasterRegistry implements ConnectionRegistry {
     return new RegionLocations(regionLocations);
   }
 
-  @Override
+  // keep the method here just for testing compatibility
   public CompletableFuture<RegionLocations> getMetaRegionLocations() {
     return tracedFuture(
       () -> this
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 1cbcf10..e691bb7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -745,8 +745,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
   @Override
   public CompletableFuture<Boolean> isTableAvailable(TableName tableName) {
     if (TableName.isMetaTableName(tableName)) {
-      return connection.registry.getMetaRegionLocations().thenApply(locs -> Stream
-        .of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null));
+      return getTableHRegionLocations(tableName).thenApply(
+        locs -> locs.stream().allMatch(loc -> loc != null && loc.getServerName() != null));
     }
     CompletableFuture<Boolean> future = new CompletableFuture<>();
     addListener(isTableEnabled(tableName), (enabled, error) -> {
@@ -762,7 +762,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
         future.complete(false);
       } else {
         addListener(
-          ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName),
+          ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName, true),
           (locations, error1) -> {
             if (error1 != null) {
               future.completeExceptionally(error1);
@@ -882,15 +882,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
 
   @Override
   public CompletableFuture<List<RegionInfo>> getRegions(TableName tableName) {
-    if (tableName.equals(META_TABLE_NAME)) {
-      return connection.registry.getMetaRegionLocations()
-        .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion)
-          .collect(Collectors.toList()));
-    } else {
-      return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName)
-        .thenApply(
-          locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList()));
-    }
+    return getTableHRegionLocations(tableName).thenApply(
+      locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList()));
   }
   @Override
   public CompletableFuture<Void> flush(TableName tableName) {
@@ -1129,23 +1122,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
    * List all region locations for the specific table.
    */
   private CompletableFuture<List<HRegionLocation>> getTableHRegionLocations(TableName tableName) {
-    if (TableName.META_TABLE_NAME.equals(tableName)) {
-      CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
-      addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> {
-        if (err != null) {
-          future.completeExceptionally(err);
-        } else if (metaRegions == null || metaRegions.isEmpty() ||
-          metaRegions.getDefaultRegionLocation() == null) {
-          future.completeExceptionally(new IOException("meta region does not found"));
-        } else {
-          future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation()));
-        }
-      });
-      return future;
-    } else {
-      // For non-meta table, we fetch all locations by scanning hbase:meta table
-      return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName);
-    }
+    return connection.getRegionLocator(tableName).getAllRegionLocations();
   }
 
   /**
@@ -2394,9 +2371,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
       String encodedName = Bytes.toString(regionNameOrEncodedRegionName);
       if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) {
         // old format encodedName, should be meta region
-        future = connection.registry.getMetaRegionLocations()
-          .thenApply(locs -> Stream.of(locs.getRegionLocations())
-            .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst());
+        future = getTableHRegionLocations(META_TABLE_NAME).thenApply(locs -> locs.stream()
+          .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst());
       } else {
         future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable,
           regionNameOrEncodedRegionName);
@@ -2413,10 +2389,9 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
       }
 
       if (regionInfo.isMetaRegion()) {
-        future = connection.registry.getMetaRegionLocations()
-          .thenApply(locs -> Stream.of(locs.getRegionLocations())
-            .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId())
-            .findFirst());
+        future = getTableHRegionLocations(META_TABLE_NAME).thenApply(locs -> locs.stream()
+          .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId())
+          .findFirst());
       } else {
         future =
           ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java
index 950123c..2380fd8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -27,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * <li>{@link #AFTER} locate the region which contains the row after the given row.</li>
  * </ul>
  */
-@InterfaceAudience.Private
-enum RegionLocateType {
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
+public enum RegionLocateType {
   BEFORE, CURRENT, AFTER
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java
new file mode 100644
index 0000000..0745796
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableRegionLocationCache.java
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.canUpdateOnError;
+import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isEqual;
+import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.removeRegionLocation;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
+
+import com.google.errorprone.annotations.RestrictedApi;
+import java.util.Comparator;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The location cache for regions of a table.
+ */
+@InterfaceAudience.Private
+class TableRegionLocationCache {
+
+  private static final Logger LOG = LoggerFactory.getLogger(TableRegionLocationCache.class);
+
+  private final Optional<MetricsConnection> metrics;
+
+  private final ConcurrentNavigableMap<byte[], RegionLocations> cache;
+
+  TableRegionLocationCache(Comparator<byte[]> comparator, Optional<MetricsConnection> metrics) {
+    this.metrics = metrics;
+    this.cache = new ConcurrentSkipListMap<>(comparator);
+  }
+
+  private void recordCacheHit() {
+    metrics.ifPresent(MetricsConnection::incrMetaCacheHit);
+  }
+
+  private void recordCacheMiss() {
+    metrics.ifPresent(MetricsConnection::incrMetaCacheMiss);
+  }
+
+  private void recordClearRegionCache() {
+    metrics.ifPresent(MetricsConnection::incrMetaCacheNumClearRegion);
+  }
+
+  private RegionLocations locateRow(TableName tableName, byte[] row, int replicaId) {
+    Map.Entry<byte[], RegionLocations> entry = cache.floorEntry(row);
+    if (entry == null) {
+      recordCacheMiss();
+      return null;
+    }
+    RegionLocations locs = entry.getValue();
+    HRegionLocation loc = locs.getRegionLocation(replicaId);
+    if (loc == null) {
+      recordCacheMiss();
+      return null;
+    }
+    byte[] endKey = loc.getRegion().getEndKey();
+    if (isEmptyStopRow(endKey) || Bytes.compareTo(row, endKey) < 0) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
+          Bytes.toStringBinary(row), RegionLocateType.CURRENT, replicaId);
+      }
+      recordCacheHit();
+      return locs;
+    } else {
+      recordCacheMiss();
+      return null;
+    }
+  }
+
+  private RegionLocations locateRowBefore(TableName tableName, byte[] row, int replicaId) {
+    boolean isEmptyStopRow = isEmptyStopRow(row);
+    Map.Entry<byte[], RegionLocations> entry =
+      isEmptyStopRow ? cache.lastEntry() : cache.lowerEntry(row);
+    if (entry == null) {
+      recordCacheMiss();
+      return null;
+    }
+    RegionLocations locs = entry.getValue();
+    HRegionLocation loc = locs.getRegionLocation(replicaId);
+    if (loc == null) {
+      recordCacheMiss();
+      return null;
+    }
+    if (isEmptyStopRow(loc.getRegion().getEndKey()) ||
+      (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName,
+          Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId);
+      }
+      recordCacheHit();
+      return locs;
+    } else {
+      recordCacheMiss();
+      return null;
+    }
+  }
+
+  RegionLocations locate(TableName tableName, byte[] row, int replicaId,
+    RegionLocateType locateType) {
+    return locateType.equals(RegionLocateType.BEFORE) ? locateRowBefore(tableName, row, replicaId) :
+      locateRow(tableName, row, replicaId);
+  }
+
+  // if we successfully add the locations to cache, return the locations, otherwise return the one
+  // which prevents us being added. The upper layer can use this value to complete pending requests.
+  RegionLocations add(RegionLocations locs) {
+    LOG.trace("Try adding {} to cache", locs);
+    byte[] startKey = locs.getRegionLocation().getRegion().getStartKey();
+    for (;;) {
+      RegionLocations oldLocs = cache.putIfAbsent(startKey, locs);
+      if (oldLocs == null) {
+        return locs;
+      }
+      // check whether the regions are the same, this usually happens when table is split/merged, or
+      // deleted and recreated again.
+      RegionInfo region = locs.getRegionLocation().getRegion();
+      RegionInfo oldRegion = oldLocs.getRegionLocation().getRegion();
+      if (region.getEncodedName().equals(oldRegion.getEncodedName())) {
+        RegionLocations mergedLocs = oldLocs.mergeLocations(locs);
+        if (isEqual(mergedLocs, oldLocs)) {
+          // the merged one is the same with the old one, give up
+          LOG.trace("Will not add {} to cache because the old value {} " +
+            " is newer than us or has the same server name." +
+            " Maybe it is updated before we replace it", locs, oldLocs);
+          return oldLocs;
+        }
+        if (cache.replace(startKey, oldLocs, mergedLocs)) {
+          return mergedLocs;
+        }
+      } else {
+        // the region is different, here we trust the one we fetched. This maybe wrong but finally
+        // the upper layer can detect this and trigger removal of the wrong locations
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("The newnly fetch region {} is different from the old one {} for row '{}'," +
+            " try replaing the old one...", region, oldRegion, Bytes.toStringBinary(startKey));
+        }
+        if (cache.replace(startKey, oldLocs, locs)) {
+          return locs;
+        }
+      }
+    }
+  }
+
+  // notice that this is not a constant time operation, do not call it on critical path.
+  int size() {
+    return cache.size();
+  }
+
+  void clearCache(ServerName serverName) {
+    for (Map.Entry<byte[], RegionLocations> entry : cache.entrySet()) {
+      byte[] regionName = entry.getKey();
+      RegionLocations locs = entry.getValue();
+      RegionLocations newLocs = locs.removeByServer(serverName);
+      if (locs == newLocs) {
+        continue;
+      }
+      if (newLocs.isEmpty()) {
+        cache.remove(regionName, locs);
+      } else {
+        cache.replace(regionName, locs, newLocs);
+      }
+    }
+  }
+
+  void removeLocationFromCache(HRegionLocation loc) {
+    byte[] startKey = loc.getRegion().getStartKey();
+    for (;;) {
+      RegionLocations oldLocs = cache.get(startKey);
+      if (oldLocs == null) {
+        return;
+      }
+      HRegionLocation oldLoc = oldLocs.getRegionLocation(loc.getRegion().getReplicaId());
+      if (!canUpdateOnError(loc, oldLoc)) {
+        return;
+      }
+      RegionLocations newLocs = removeRegionLocation(oldLocs, loc.getRegion().getReplicaId());
+      if (newLocs == null) {
+        if (cache.remove(startKey, oldLocs)) {
+          recordClearRegionCache();
+          return;
+        }
+      } else {
+        if (cache.replace(startKey, oldLocs, newLocs)) {
+          recordClearRegionCache();
+          return;
+        }
+      }
+    }
+  }
+
+  RegionLocations get(byte[] key) {
+    return cache.get(key);
+  }
+
+  // only used for testing whether we have cached the location for a table.
+  @RestrictedApi(explanation = "Should only be called in AbstractAsyncTableRegionLocator",
+    link = "", allowedOnPath = ".*/AbstractAsyncTableRegionLocator.java")
+  int getNumberOfCachedRegionLocations() {
+    return cache.values().stream().mapToInt(RegionLocations::numNonNullElements).sum();
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
index 3918dbc..bf93776 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
@@ -115,7 +115,7 @@ class ZKConnectionRegistry implements ConnectionRegistry {
   }
 
   private static void tryComplete(MutableInt remaining, HRegionLocation[] locs,
-      CompletableFuture<RegionLocations> future) {
+    CompletableFuture<RegionLocations> future) {
     remaining.decrement();
     if (remaining.intValue() > 0) {
       return;
@@ -123,8 +123,8 @@ class ZKConnectionRegistry implements ConnectionRegistry {
     future.complete(new RegionLocations(locs));
   }
 
-  private Pair<RegionState.State, ServerName> getStateAndServerName(
-      ZooKeeperProtos.MetaRegionServer proto) {
+  private Pair<RegionState.State, ServerName>
+    getStateAndServerName(ZooKeeperProtos.MetaRegionServer proto) {
     RegionState.State state;
     if (proto.hasState()) {
       state = RegionState.State.convert(proto.getState());
@@ -137,7 +137,7 @@ class ZKConnectionRegistry implements ConnectionRegistry {
   }
 
   private void getMetaRegionLocation(CompletableFuture<RegionLocations> future,
-      List<String> metaReplicaZNodes) {
+    List<String> metaReplicaZNodes) {
     if (metaReplicaZNodes.isEmpty()) {
       future.completeExceptionally(new IOException("No meta znode available"));
     }
@@ -193,7 +193,7 @@ class ZKConnectionRegistry implements ConnectionRegistry {
     }
   }
 
-  @Override
+  // keep the method here just for testing compatibility
   public CompletableFuture<RegionLocations> getMetaRegionLocations() {
     return tracedFuture(() -> {
       CompletableFuture<RegionLocations> future = new CompletableFuture<>();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 329894c..b6918ca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLoadStats;
+import org.apache.hadoop.hbase.client.RegionLocateType;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
@@ -3826,6 +3827,7 @@ public final class ProtobufUtil {
       .build();
   }
 
+
   public static HBaseProtos.LogRequest toBalancerRejectionRequest(int limit) {
     MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest =
       MasterProtos.BalancerRejectionsRequest.newBuilder().setLimit(limit).build();
@@ -3835,4 +3837,29 @@ public final class ProtobufUtil {
       .build();
   }
 
+  public static MasterProtos.RegionLocateType toProtoRegionLocateType(RegionLocateType pojo) {
+    switch (pojo) {
+      case BEFORE:
+        return MasterProtos.RegionLocateType.REGION_LOCATE_TYPE_BEFORE;
+      case CURRENT:
+        return MasterProtos.RegionLocateType.REGION_LOCATE_TYPE_CURRENT;
+      case AFTER:
+        return MasterProtos.RegionLocateType.REGION_LOCATE_TYPE_AFTER;
+      default:
+        throw new IllegalArgumentException("Unknown RegionLocateType: " + pojo);
+    }
+  }
+
+  public static RegionLocateType toRegionLocateType(MasterProtos.RegionLocateType proto) {
+    switch (proto) {
+      case REGION_LOCATE_TYPE_BEFORE:
+        return RegionLocateType.BEFORE;
+      case REGION_LOCATE_TYPE_CURRENT:
+        return RegionLocateType.CURRENT;
+      case REGION_LOCATE_TYPE_AFTER:
+        return RegionLocateType.AFTER;
+      default:
+        throw new IllegalArgumentException("Unknown proto RegionLocateType: " + proto);
+    }
+  }
 }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
index 4bd66877..64ded7f 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client;
 
 import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -33,11 +32,6 @@ class DoNothingConnectionRegistry implements ConnectionRegistry {
   }
 
   @Override
-  public CompletableFuture<RegionLocations> getMetaRegionLocations() {
-    return CompletableFuture.completedFuture(null);
-  }
-
-  @Override
   public CompletableFuture<String> getClusterId() {
     return CompletableFuture.completedFuture(null);
   }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java
deleted file mode 100644
index b306500..0000000
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.FutureUtils;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ ClientTests.class, SmallTests.class })
-public class TestAsyncMetaRegionLocatorFailFast {
-
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestAsyncMetaRegionLocatorFailFast.class);
-
-  private static Configuration CONF = HBaseConfiguration.create();
-
-  private static AsyncMetaRegionLocator LOCATOR;
-
-  private static final class FaultyConnectionRegistry extends DoNothingConnectionRegistry {
-
-    public FaultyConnectionRegistry(Configuration conf) {
-      super(conf);
-    }
-
-    @Override
-    public CompletableFuture<RegionLocations> getMetaRegionLocations() {
-      return FutureUtils.failedFuture(new DoNotRetryRegionException("inject error"));
-    }
-  }
-
-  @BeforeClass
-  public static void setUp() {
-    LOCATOR = new AsyncMetaRegionLocator(new FaultyConnectionRegistry(CONF));
-  }
-
-  @Test(expected = DoNotRetryIOException.class)
-  public void test() throws IOException {
-    FutureUtils.get(LOCATOR.getRegionLocations(RegionInfo.DEFAULT_REPLICA_ID, false));
-  }
-}
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
index 15b00f6..180d294 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
@@ -23,6 +23,7 @@ import io.opentelemetry.api.trace.StatusCode;
 import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule;
 import io.opentelemetry.sdk.trace.data.SpanData;
 import java.io.IOException;
+import java.net.SocketAddress;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
@@ -35,6 +36,9 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.ipc.RpcClientFactory;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -42,12 +46,22 @@ import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
+import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
+import org.apache.hbase.thirdparty.com.google.protobuf.Message;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionResponse;
 
 @Category({ ClientTests.class, MediumTests.class })
 public class TestAsyncRegionLocatorTracing {
@@ -60,14 +74,63 @@ public class TestAsyncRegionLocatorTracing {
 
   private AsyncConnectionImpl conn;
 
-  private RegionLocations locs;
+  private static RegionLocations locs;
 
   @Rule
   public OpenTelemetryRule traceRule = OpenTelemetryRule.create();
 
+  public static final class RpcClientForTest implements RpcClient {
+
+    public RpcClientForTest(Configuration configuration, String clusterId,
+      SocketAddress localAddress, MetricsConnection metrics) {
+    }
+
+    @Override
+    public BlockingRpcChannel createBlockingRpcChannel(ServerName sn, User user, int rpcTimeout) {
+      throw new UnsupportedOperationException("should not be called");
+    }
+
+    @Override
+    public RpcChannel createRpcChannel(ServerName sn, User user, int rpcTimeout) {
+      return new RpcChannel() {
+
+        @Override
+        public void callMethod(MethodDescriptor method, RpcController controller, Message request,
+          Message responsePrototype, RpcCallback<Message> done) {
+          LocateMetaRegionResponse.Builder builder = LocateMetaRegionResponse.newBuilder();
+          for (HRegionLocation loc : locs) {
+            if (loc != null) {
+              builder.addMetaLocations(ProtobufUtil.toRegionLocation(loc));
+            }
+          }
+          done.run(builder.build());
+        }
+      };
+    }
+
+    @Override
+    public void cancelConnections(ServerName sn) {
+    }
+
+    @Override
+    public void close() {
+    }
+
+    @Override
+    public boolean hasCellBlockSupport() {
+      return false;
+    }
+  }
+
+  @BeforeClass
+  public static void setUpBeforeClass() {
+    CONF.setClass(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, RpcClientForTest.class,
+      RpcClient.class);
+  }
+
   @Before
   public void setUp() throws IOException {
-    RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+    RegionInfo metaRegionInfo = RegionInfoBuilder.FIRST_META_REGIONINFO;
     locs = new RegionLocations(
       new HRegionLocation(metaRegionInfo,
         ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())),
@@ -78,8 +141,9 @@ public class TestAsyncRegionLocatorTracing {
     conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF) {
 
       @Override
-      public CompletableFuture<RegionLocations> getMetaRegionLocations() {
-        return CompletableFuture.completedFuture(locs);
+      public CompletableFuture<ServerName> getActiveMaster() {
+        return CompletableFuture.completedFuture(
+          ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime()));
       }
     }, "test", null, UserProvider.instantiate(CONF).getCurrent());
   }
@@ -104,8 +168,7 @@ public class TestAsyncRegionLocatorTracing {
 
   @Test
   public void testClearCacheServerName() {
-    ServerName sn = ServerName.valueOf("127.0.0.1", 12345,
-      EnvironmentEdgeManager.currentTime());
+    ServerName sn = ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime());
     conn.getLocator().clearCache(sn);
     SpanData span = waitSpan("AsyncRegionLocator.clearCache");
     assertEquals(StatusCode.OK, span.getStatus().getStatusCode());
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java
index 5adb1e8..783aed6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaCellComparator.java
@@ -76,7 +76,7 @@ public class MetaCellComparator extends CellComparatorImpl {
     return ignoreSequenceid ? diff : Longs.compare(b.getSequenceId(), a.getSequenceId());
   }
 
-  private static int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset,
+  public static int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset,
       int rlength) {
     int leftDelimiter = Bytes.searchDelimiterIndex(left, loffset, llength, HConstants.DELIMITER);
     int rightDelimiter = Bytes.searchDelimiterIndex(right, roffset, rlength, HConstants.DELIMITER);
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
index 3d265dd..5302d51 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
@@ -1331,6 +1331,29 @@ message GetMetaRegionLocationsResponse {
   repeated RegionLocation meta_locations = 1;
 }
 
+enum RegionLocateType {
+  REGION_LOCATE_TYPE_BEFORE =1;
+  REGION_LOCATE_TYPE_CURRENT = 2;
+  REGION_LOCATE_TYPE_AFTER = 3;
+}
+
+message LocateMetaRegionRequest {
+  required bytes row = 1;
+  required RegionLocateType locateType = 2;
+}
+
+message LocateMetaRegionResponse {
+  repeated RegionLocation meta_locations = 1;
+}
+
+message GetAllMetaRegionLocationsRequest {
+  required bool exclude_offlined_split_parents = 1;
+}
+
+message GetAllMetaRegionLocationsResponse {
+  repeated RegionLocation meta_locations = 1;
+}
+
 /**
  * Implements all the RPCs needed by clients to look up cluster meta information needed for
  * connection establishment.
@@ -1356,4 +1379,16 @@ service ClientMetaService {
    * Get current meta replicas' region locations.
    */
   rpc GetMetaRegionLocations(GetMetaRegionLocationsRequest) returns(GetMetaRegionLocationsResponse);
+
+  /**
+   * Get meta region locations for a given row
+   */
+  rpc LocateMetaRegion(LocateMetaRegionRequest)
+    returns(LocateMetaRegionResponse);
+
+  /**
+   * Get all meta regions locations
+   */
+  rpc GetAllMetaRegionLocations(GetAllMetaRegionLocationsRequest)
+    returns(GetAllMetaRegionLocationsResponse);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index ac35caa..8ca8972 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -24,6 +24,7 @@ import java.util.Map;
 import java.util.Set;
 import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
@@ -31,6 +32,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionLocateType;
 import org.apache.hadoop.hbase.client.SnapshotDescription;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.RegionPlan;
@@ -1763,7 +1765,7 @@ public interface MasterObserver {
       throws IOException {
   }
 
-  /*
+  /**
    * Called before checking if user has permissions.
    * @param ctx the coprocessor instance's environment
    * @param userName the user name
@@ -1782,4 +1784,44 @@ public interface MasterObserver {
   default void postHasUserPermissions(ObserverContext<MasterCoprocessorEnvironment> ctx,
       String userName, List<Permission> permissions) throws IOException {
   }
+
+  /**
+   * Called before locating meta region.
+   * @param ctx ctx the coprocessor instance's environment
+   * @param row the row key to locate
+   * @param locateType the direction of the locate operation
+   */
+  default void preLocateMetaRegion(ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] row,
+    RegionLocateType locateType) throws IOException {
+  }
+
+  /**
+   * Called after locating meta region.
+   * @param ctx ctx the coprocessor instance's environment
+   * @param row the row key to locate
+   * @param locateType the direction of the locate operation
+   * @param locs the locations of the given meta region, including meta replicas if any.
+   */
+  default void postLocateMetaRegion(ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] row,
+    RegionLocateType locateType, List<HRegionLocation> locs) throws IOException {
+  }
+
+  /**
+   * Called before getting all locations for meta regions.
+   * @param ctx ctx the coprocessor instance's environment
+   * @param excludeOfflinedSplitParents don't return split parents
+   */
+  default void preGetAllMetaRegionLocations(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    boolean excludeOfflinedSplitParents) {
+  }
+
+  /**
+   * Called after getting all locations for meta regions.
+   * @param ctx ctx the coprocessor instance's environment
+   * @param excludeOfflinedSplitParents don't return split parents
+   * @param locs the locations of all meta regions, including meta replicas if any.
+   */
+  default void postGetAllMetaRegionLocations(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    boolean excludeOfflinedSplitParents, List<HRegionLocation> locs) {
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 961c929..171966f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED
 import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
 import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
 import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
@@ -58,7 +59,6 @@ import org.apache.hadoop.hbase.CatalogFamilyFormat;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
-import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
@@ -67,12 +67,14 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.PleaseRestartMasterException;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.ServerMetrics;
@@ -88,12 +90,15 @@ import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocateType;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.exceptions.MasterStoppedException;
 import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
@@ -221,7 +226,6 @@ import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
 import org.apache.hadoop.hbase.zookeeper.SnapshotCleanupTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
@@ -232,6 +236,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@@ -242,6 +247,8 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server;
 import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
 import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder;
 import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@@ -773,6 +780,27 @@ public class HMaster extends HRegionServer implements MasterServices {
     return new AssignmentManager(master, masterRegion);
   }
 
+  /**
+   * Load the meta region state from the meta region server ZNode.
+   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
+   * @param replicaId the ID of the replica
+   * @return regionstate
+   * @throws KeeperException if a ZooKeeper operation fails
+   */
+  private static RegionState getMetaRegionState(ZKWatcher zkw, int replicaId)
+    throws KeeperException {
+    RegionState regionState = null;
+    try {
+      byte[] data = ZKUtil.getData(zkw, zkw.getZNodePaths().getZNodeForReplica(replicaId));
+      regionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId);
+    } catch (DeserializationException e) {
+      throw ZKUtil.convert(e);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+    }
+    return regionState;
+  }
+
   private void tryMigrateRootTableFromZooKeeper() throws IOException, KeeperException {
     // try migrate data from zookeeper
     try (RegionScanner scanner =
@@ -794,7 +822,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     StringBuilder info = new StringBuilder("Migrating meta location:");
     for (String metaReplicaNode : metaReplicaNodes) {
       int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaNode);
-      RegionState state = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId);
+      RegionState state = getMetaRegionState(zooKeeper, replicaId);
       info.append(" ").append(state);
       put.setTimestamp(state.getStamp());
       MetaTableAccessor.addRegionInfo(put, state.getRegion());
@@ -3922,4 +3950,85 @@ public class HMaster extends HRegionServer implements MasterServices {
   public MetaLocationSyncer getMetaLocationSyncer() {
     return metaLocationSyncer;
   }
+
+  public RegionLocations locateMeta(byte[] row, RegionLocateType locateType) throws IOException {
+    if (locateType == RegionLocateType.AFTER) {
+      // as we know the exact row after us, so we can just create the new row, and use the same
+      // algorithm to locate it.
+      row = Arrays.copyOf(row, row.length + 1);
+      locateType = RegionLocateType.CURRENT;
+    }
+    Scan scan =
+      CatalogFamilyFormat.createRegionLocateScan(TableName.META_TABLE_NAME, row, locateType, 1);
+    try (RegionScanner scanner = masterRegion.getScanner(scan)) {
+      boolean moreRows;
+      List<Cell> cells = new ArrayList<>();
+      do {
+        moreRows = scanner.next(cells);
+        if (cells.isEmpty()) {
+          continue;
+        }
+        Result result = Result.create(cells);
+        cells.clear();
+        RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result);
+        if (locs == null || locs.getDefaultRegionLocation() == null) {
+          LOG.warn("No location found when locating meta region with row='{}', locateType={}",
+            Bytes.toStringBinary(row), locateType);
+          return null;
+        }
+        HRegionLocation loc = locs.getDefaultRegionLocation();
+        RegionInfo info = loc.getRegion();
+        if (info == null) {
+          LOG.warn("HRegionInfo is null when locating meta region with row='{}', locateType={}",
+            Bytes.toStringBinary(row), locateType);
+          return null;
+        }
+        if (info.isSplitParent()) {
+          continue;
+        }
+        return locs;
+      } while (moreRows);
+      LOG.warn("No location available when locating meta region with row='{}', locateType={}",
+        Bytes.toStringBinary(row), locateType);
+      return null;
+    }
+  }
+
+  public List<RegionLocations> getAllMetaRegionLocations(boolean excludeOfflinedSplitParents)
+    throws IOException {
+    Scan scan = new Scan().addFamily(HConstants.CATALOG_FAMILY);
+    List<RegionLocations> list = new ArrayList<>();
+    try (RegionScanner scanner = masterRegion.getScanner(scan)) {
+      boolean moreRows;
+      List<Cell> cells = new ArrayList<>();
+      do {
+        moreRows = scanner.next(cells);
+        if (cells.isEmpty()) {
+          continue;
+        }
+        Result result = Result.create(cells);
+        cells.clear();
+        RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result);
+        if (locs == null) {
+          LOG.warn("No locations in {}", result);
+          continue;
+        }
+        HRegionLocation loc = locs.getRegionLocation();
+        if (loc == null) {
+          LOG.warn("No non null location in {}", result);
+          continue;
+        }
+        RegionInfo info = loc.getRegion();
+        if (info == null) {
+          LOG.warn("No serialized RegionInfo in {}", result);
+          continue;
+        }
+        if (excludeOfflinedSplitParents && info.isSplitParent()) {
+          continue;
+        }
+        list.add(locs);
+      } while (moreRows);
+    }
+    return list;
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 01d1a62..728da5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -25,6 +25,7 @@ import java.util.Map;
 import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterMetrics;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionLocateType;
 import org.apache.hadoop.hbase.client.SharedConnection;
 import org.apache.hadoop.hbase.client.SnapshotDescription;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -2038,4 +2040,42 @@ public class MasterCoprocessorHost
       }
     });
   }
+
+  public void preLocateMetaRegion(byte[] row, RegionLocateType locateType) throws IOException {
+    execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
+      @Override
+      public void call(MasterObserver observer) throws IOException {
+        observer.preLocateMetaRegion(this, row, locateType);
+      }
+    });
+  }
+
+  public void postLocateMetaRegion(byte[] row, RegionLocateType locateType,
+    List<HRegionLocation> locs) throws IOException {
+    execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
+      @Override
+      public void call(MasterObserver observer) throws IOException {
+        observer.postLocateMetaRegion(this, row, locateType, locs);
+      }
+    });
+  }
+
+  public void preGetAllMetaRegionLocations(boolean excludeOfflinedSplitParents) throws IOException {
+    execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
+      @Override
+      public void call(MasterObserver observer) throws IOException {
+        observer.preGetAllMetaRegionLocations(this, excludeOfflinedSplitParents);
+      }
+    });
+  }
+
+  public void postGetAllMetaRegionLocations(boolean excludeOfflinedSplitParents,
+    List<HRegionLocation> locs) throws IOException {
+    execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
+      @Override
+      public void call(MasterObserver observer) throws IOException {
+        observer.postGetAllMetaRegionLocations(this, excludeOfflinedSplitParents, locs);
+      }
+    });
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index e7bf96d..81ea0c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerMetricsBuilder;
@@ -54,7 +55,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocateType;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
@@ -75,7 +76,6 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
-import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
 import org.apache.hadoop.hbase.master.janitor.MetaFixer;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -125,7 +125,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -210,6 +209,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaReq
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FixMetaResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetActiveMasterResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterIdResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
@@ -268,6 +269,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableD
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
@@ -408,10 +411,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.
  */
 @InterfaceAudience.Private
 @SuppressWarnings("deprecation")
-public class MasterRpcServices extends RSRpcServices implements
-    MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
-    LockService.BlockingInterface, HbckService.BlockingInterface,
-    ClientMetaService.BlockingInterface {
+public class MasterRpcServices extends RSRpcServices implements MasterService.BlockingInterface,
+  RegionServerStatusService.BlockingInterface, LockService.BlockingInterface,
+  HbckService.BlockingInterface, ClientMetaService.BlockingInterface {
 
   private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName());
   private static final Logger AUDITLOG =
@@ -545,18 +547,17 @@ public class MasterRpcServices extends RSRpcServices implements
   @Override
   protected List<BlockingServiceAndInterface> getServices() {
     List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
-    bssi.add(new BlockingServiceAndInterface(
-        MasterService.newReflectiveBlockingService(this),
-        MasterService.BlockingInterface.class));
-    bssi.add(new BlockingServiceAndInterface(
-        RegionServerStatusService.newReflectiveBlockingService(this),
+    bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this),
+      MasterService.BlockingInterface.class));
+    bssi.add(
+      new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this),
         RegionServerStatusService.BlockingInterface.class));
     bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this),
-        LockService.BlockingInterface.class));
+      LockService.BlockingInterface.class));
     bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this),
-        HbckService.BlockingInterface.class));
+      HbckService.BlockingInterface.class));
     bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this),
-        ClientMetaService.BlockingInterface.class));
+      ClientMetaService.BlockingInterface.class));
     bssi.addAll(super.getServices());
     return bssi;
   }
@@ -1718,39 +1719,31 @@ public class MasterRpcServices extends RSRpcServices implements
   }
 
   @Override
-  public UnassignRegionResponse unassignRegion(RpcController controller,
-      UnassignRegionRequest req) throws ServiceException {
+  public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req)
+    throws ServiceException {
     try {
-      final byte [] regionName = req.getRegion().getValue().toByteArray();
+      final byte[] regionName = req.getRegion().getValue().toByteArray();
       RegionSpecifierType type = req.getRegion().getType();
       UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
 
       master.checkInitialized();
       if (type != RegionSpecifierType.REGION_NAME) {
-        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
-          + " actual: " + type);
+        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME +
+          " actual: " + type);
       }
-      Pair<RegionInfo, ServerName> pair =
-        MetaTableAccessor.getRegion(master.getConnection(), regionName);
-      if (Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(), regionName)) {
-        pair = new Pair<>(RegionInfoBuilder.FIRST_META_REGIONINFO,
-          MetaTableLocator.getMetaRegionLocation(master.getZooKeeper()));
-      }
-      if (pair == null) {
-        throw new UnknownRegionException(Bytes.toString(regionName));
+      final RegionInfo regionInfo = master.getAssignmentManager().getRegionInfo(regionName);
+      if (regionInfo == null) {
+        throw new UnknownRegionException(Bytes.toStringBinary(regionName));
       }
-
-      RegionInfo hri = pair.getFirst();
       if (master.cpHost != null) {
-        master.cpHost.preUnassign(hri);
+        master.cpHost.preUnassign(regionInfo);
       }
-      LOG.debug(master.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
+      LOG.debug(master.getClientIdAuditPrefix() + " unassign " + regionInfo.getRegionNameAsString()
           + " in current location if it is online");
-      master.getAssignmentManager().unassign(hri);
+      master.getAssignmentManager().unassign(regionInfo);
       if (master.cpHost != null) {
-        master.cpHost.postUnassign(hri);
+        master.cpHost.postUnassign(regionInfo);
       }
-
       return urr;
     } catch (IOException ioe) {
       throw new ServiceException(ioe);
@@ -3460,4 +3453,66 @@ public class MasterRpcServices extends RSRpcServices implements
       .addAllBalancerRejection(balancerRejections).build();
   }
 
+  @Override
+  public LocateMetaRegionResponse locateMetaRegion(RpcController controller,
+    LocateMetaRegionRequest request) throws ServiceException {
+    byte[] row = request.getRow().toByteArray();
+    RegionLocateType locateType = ProtobufUtil.toRegionLocateType(request.getLocateType());
+    try {
+      master.checkServiceStarted();
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preLocateMetaRegion(row, locateType);
+      }
+      RegionLocations locs = master.locateMeta(row, locateType);
+      List<HRegionLocation> list = new ArrayList<>();
+      LocateMetaRegionResponse.Builder builder = LocateMetaRegionResponse.newBuilder();
+      if (locs != null) {
+        for (HRegionLocation loc : locs) {
+          if (loc != null) {
+            builder.addMetaLocations(ProtobufUtil.toRegionLocation(loc));
+            list.add(loc);
+          }
+        }
+      }
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postLocateMetaRegion(row, locateType, list);
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetAllMetaRegionLocationsResponse getAllMetaRegionLocations(RpcController controller,
+    GetAllMetaRegionLocationsRequest request) throws ServiceException {
+    boolean excludeOfflinedSplitParents = request.getExcludeOfflinedSplitParents();
+    try {
+      master.checkServiceStarted();
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preGetAllMetaRegionLocations(excludeOfflinedSplitParents);
+      }
+      List<RegionLocations> locs = master.getAllMetaRegionLocations(excludeOfflinedSplitParents);
+      List<HRegionLocation> list = new ArrayList<>();
+      GetAllMetaRegionLocationsResponse.Builder builder =
+        GetAllMetaRegionLocationsResponse.newBuilder();
+      if (locs != null) {
+        for (RegionLocations ls : locs) {
+          for (HRegionLocation loc : ls) {
+            if (loc != null) {
+              builder.addMetaLocations(ProtobufUtil.toRegionLocation(loc));
+              list.add(loc);
+            }
+          }
+        }
+      }
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postGetAllMetaRegionLocations(excludeOfflinedSplitParents,
+          list);
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index f24ecd4..1cee59f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
 import java.util.List;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -577,4 +578,12 @@ public interface MasterServices extends Server {
    * We need to get this in MTP to tell the syncer the new meta replica count.
    */
   MetaLocationSyncer getMetaLocationSyncer();
+
+  /**
+   * Get locations for all meta regions.
+   * @param excludeOfflinedSplitParents don't return split parents
+   * @return The locations of all the meta regions
+   */
+  List<RegionLocations> getAllMetaRegionLocations(boolean excludeOfflinedSplitParents)
+    throws IOException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
index 07512d1..b192a67 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java
@@ -39,11 +39,14 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 /**
- * A cache of meta region location metadata. Registers a listener on ZK to track changes to the
- * meta table znodes. Clients are expected to retry if the meta information is stale. This class
- * is thread-safe (a single instance of this class can be shared by multiple threads without race
+ * A cache of meta region location metadata. Registers a listener on ZK to track changes to the meta
+ * table znodes. Clients are expected to retry if the meta information is stale. This class is
+ * thread-safe (a single instance of this class can be shared by multiple threads without race
  * conditions).
+ * @deprecated Now we store meta location in the local store at master side so we should get the
+ *             meta location from active master instead of zk, keep it here only for compatibility.
  */
+@Deprecated
 @InterfaceAudience.Private
 public class MetaRegionLocationCache extends ZKListener {
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 87c04da..cce7a81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -191,8 +191,8 @@ public class RegionStateStore {
     final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time);
     MetaTableAccessor.addRegionInfo(put, regionInfo);
     final StringBuilder info =
-      new StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
-        .append(regionInfo.getEncodedName()).append(", regionState=").append(state);
+      new StringBuilder("pid=").append(pid).append(" updating catalog row=")
+        .append(regionInfo.getRegionNameAsString()).append(", regionState=").append(state);
     if (openSeqNum >= 0) {
       Preconditions.checkArgument(state == State.OPEN && regionLocation != null,
         "Open region should be on a server");
@@ -228,7 +228,7 @@ public class RegionStateStore {
   }
 
   public void mirrorMetaLocation(RegionInfo regionInfo, ServerName serverName, State state)
-      throws IOException {
+    throws IOException {
     try {
       MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName, regionInfo.getReplicaId(),
         state);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java
index 51790aa..3d00e49 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterStatusServlet.java
@@ -27,11 +27,11 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -82,7 +82,8 @@ public class MasterStatusServlet extends HttpServlet {
   }
 
   private ServerName getMetaLocationOrNull(HMaster master) {
-    return MetaTableLocator.getMetaRegionLocation(master.getZooKeeper());
+    return master.getAssignmentManager().getRegionStates()
+      .getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO).getServerName();
   }
 
   private Map<String, Integer> getFragmentationInfo(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 2313e70..30c3458 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -368,8 +368,6 @@ public class CreateTableProcedure
     final TableDescriptor tableDescriptor, final List<RegionInfo> regions) throws IOException {
     assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions;
 
-    ProcedureSyncWait.waitMetaRegions(env);
-
     // Add replicas if needed
     // we need to create regions with replicaIds starting from 1
     List<RegionInfo> newRegions =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
index 46621da..b28c95f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
@@ -26,7 +26,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
@@ -34,7 +33,6 @@ import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import org.slf4j.Logger;
@@ -222,18 +220,6 @@ public final class ProcedureSyncWait {
     throw new TimeoutIOException("Timed out while waiting on " + purpose);
   }
 
-  protected static void waitMetaRegions(final MasterProcedureEnv env) throws IOException {
-    int timeout = env.getMasterConfiguration().getInt("hbase.client.catalog.timeout", 10000);
-    try {
-      if (MetaTableLocator.waitMetaRegionLocation(env.getMasterServices().getZooKeeper(),
-        timeout) == null) {
-        throw new NotAllMetaRegionsOnlineException();
-      }
-    } catch (InterruptedException e) {
-      throw (InterruptedIOException) new InterruptedIOException().initCause(e);
-    }
-  }
-
   protected static void waitRegionInTransition(final MasterProcedureEnv env,
       final List<RegionInfo> regions) throws IOException {
     final RegionStates states = env.getAssignmentManager().getRegionStates();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index 23d0263..507058b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -21,8 +21,11 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -36,7 +39,6 @@ import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import org.slf4j.Logger;
@@ -159,7 +161,9 @@ public final class MasterSnapshotVerifier {
   private void verifyRegions(final SnapshotManifest manifest) throws IOException {
     List<RegionInfo> regions;
     if (TableName.META_TABLE_NAME.equals(tableName)) {
-      regions = MetaTableLocator.getMetaRegions(services.getZooKeeper());
+      regions = services.getAllMetaRegionLocations(false).stream()
+        .flatMap(locs -> Stream.of(locs.getRegionLocations())).filter(l -> l != null)
+        .map(HRegionLocation::getRegion).collect(Collectors.toList());
     } else {
       regions = MetaTableAccessor.getTableRegions(services.getConnection(), tableName);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 5ff8a49..108f0fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -22,6 +22,8 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.CancellationException;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -49,7 +51,6 @@ import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -197,12 +198,15 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       monitor.rethrowException();
 
       List<Pair<RegionInfo, ServerName>> regionsAndLocations;
+
       if (TableName.META_TABLE_NAME.equals(snapshotTable)) {
-        regionsAndLocations = MetaTableLocator.getMetaRegionsAndLocations(
-          server.getZooKeeper());
+        regionsAndLocations = master.getAllMetaRegionLocations(false).stream()
+          .flatMap(locs -> Stream.of(locs.getRegionLocations())).filter(l -> l != null)
+          .map(loc -> Pair.newPair(loc.getRegion(), loc.getServerName()))
+          .collect(Collectors.toList());
       } else {
-        regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
-          server.getConnection(), snapshotTable, false);
+        regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(server.getConnection(),
+          snapshotTable, false);
       }
 
       // run the snapshot
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
index c6a3b92..2b28886 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
@@ -24,13 +24,14 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ThreadPoolExecutor;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -44,7 +45,6 @@ import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -131,20 +131,16 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager {
     // Each region server will get its own online regions for the table.
     // We may still miss regions that need to be flushed.
     List<Pair<RegionInfo, ServerName>> regionsAndLocations;
-
-    if (TableName.META_TABLE_NAME.equals(tableName)) {
-      regionsAndLocations = MetaTableLocator.getMetaRegionsAndLocations(
-        master.getZooKeeper());
-    } else {
-      regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
-        master.getConnection(), tableName, false);
+    try (RegionLocator locator =
+      master.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+      regionsAndLocations = locator.getAllRegionLocations().stream()
+        .map(loc -> Pair.newPair(loc.getRegion(), loc.getServerName()))
+        .collect(Collectors.toList());
     }
 
     Set<String> regionServers = new HashSet<>(regionsAndLocations.size());
     for (Pair<RegionInfo, ServerName> region : regionsAndLocations) {
       if (region != null && region.getFirst() != null && region.getSecond() != null) {
-        RegionInfo hri = region.getFirst();
-        if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) continue;
         regionServers.add(region.getSecond().toString());
       }
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index c00a8b7..ae602a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -120,7 +120,6 @@ import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterRpcServicesVersionWrapper;
-import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.mob.MobFileCache;
 import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
@@ -178,7 +177,6 @@ import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -2416,8 +2414,8 @@ public class HRegionServer extends Thread implements
   }
 
   /**
-   * Helper method for use in tests. Skip the region transition report when there's no master
-   * around to receive it.
+   * Helper method for use in tests. Skip the region transition report when there's no master around
+   * to receive it.
    */
   private boolean skipReportingTransition(final RegionStateTransitionContext context) {
     final TransitionCode code = context.getCode();
@@ -2428,17 +2426,13 @@ public class HRegionServer extends Thread implements
     if (code == TransitionCode.OPENED) {
       Preconditions.checkArgument(hris != null && hris.length == 1);
       if (hris[0].isMetaRegion()) {
-        try {
-          MetaTableLocator.setMetaLocation(getZooKeeper(), serverName,
-              hris[0].getReplicaId(), RegionState.State.OPEN);
-        } catch (KeeperException e) {
-          LOG.info("Failed to update meta location", e);
-          return false;
-        }
+        LOG.warn(
+          "meta table location is stored in master local store, so we can not skip reporting");
+        return false;
       } else {
         try {
           MetaTableAccessor.updateRegionLocation(asyncClusterConnection.toConnection(), hris[0],
-              serverName, openSeqNum, masterSystemTime);
+            serverName, openSeqNum, masterSystemTime);
         } catch (IOException e) {
           LOG.info("Failed to update meta", e);
           return false;
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index f6753df..29913b5 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -68,7 +68,6 @@
 <%@ page import="org.apache.hadoop.hbase.quotas.ThrottleSettings" %>
 <%@ page import="org.apache.hadoop.hbase.util.Bytes" %>
 <%@ page import="org.apache.hadoop.hbase.util.FSUtils" %>
-<%@ page import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator" %>
 <%@ page import="org.apache.hadoop.util.StringUtils" %>
 <%@ page import="org.apache.hbase.thirdparty.com.google.protobuf.ByteString" %>
 <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos" %>
@@ -314,14 +313,9 @@
           for (int j = 0; j < numMetaReplicas; j++) {
             RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
                                     RegionInfoBuilder.FIRST_META_REGIONINFO, j);
-            //If a metaLocation is null, All of its info would be empty here to be displayed.
-            ServerName metaLocation = null;
-            try {
-              metaLocation = MetaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1);
-            } catch (NotAllMetaRegionsOnlineException e) {
-              //Region in transition state here throw a NotAllMetaRegionsOnlineException causes
-              //the UI crash.
-            }
+            RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta);
+            // If a metaLocation is null, All of its info would be empty here to be displayed.
+            ServerName metaLocation = regionState != null ? regionState.getServerName() : null;
             for (int i = 0; i < 1; i++) {
               //If metaLocation is null, default value below would be displayed in UI.
               String hostAndPort = "";
@@ -387,14 +381,9 @@
            for (int j = 0; j < numMetaReplicas; j++) {
              RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
                                      RegionInfoBuilder.FIRST_META_REGIONINFO, j);
-             //If a metaLocation is null, All of its info would be empty here to be displayed.
-             ServerName metaLocation = null;
-             try {
-               metaLocation = MetaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1);
-             } catch (NotAllMetaRegionsOnlineException e) {
-               //Region in transition state here throw a NotAllMetaRegionsOnlineException causes
-               //the UI crash.
-             }
+             RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta);
+             // If a metaLocation is null, All of its info would be empty here to be displayed.
+             ServerName metaLocation = regionState != null ? regionState.getServerName() : null;
              for (int i = 0; i < 1; i++) {
                //If metaLocation is null, default value below would be displayed in UI.
                String hostAndPort = "";
@@ -443,14 +432,9 @@
           for (int j = 0; j < numMetaReplicas; j++) {
             RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
                                     RegionInfoBuilder.FIRST_META_REGIONINFO, j);
-            //If a metaLocation is null, All of its info would be empty here to be displayed.
-            ServerName metaLocation = null;
-            try {
-              metaLocation = MetaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1);
-            } catch (NotAllMetaRegionsOnlineException e) {
-              //Region in transition state here throw a NotAllMetaRegionsOnlineException causes
-              //the UI crash.
-            }
+            RegionState regionState = master.getAssignmentManager().getRegionStates().getRegionState(meta);
+            // If a metaLocation is null, All of its info would be empty here to be displayed.
+            ServerName metaLocation = regionState != null ? regionState.getServerName() : null;
             for (int i = 0; i < 1; i++) {
               //If metaLocation is null, default value below would be displayed in UI.
               String hostAndPort = "";
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
index 922da6f..9169c22 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -243,13 +242,6 @@ public class TestMetaTableAccessor {
   }
 
   @Test
-  public void testGetRegionsFromMetaTable() throws IOException, InterruptedException {
-    List<RegionInfo> regions = MetaTableLocator.getMetaRegions(UTIL.getZooKeeperWatcher());
-    assertTrue(regions.size() >= 1);
-    assertTrue(MetaTableLocator.getMetaRegionsAndLocations(UTIL.getZooKeeperWatcher()).size() >= 1);
-  }
-
-  @Test
   public void testGetRegion() throws IOException, InterruptedException {
     final String name = this.name.getMethodName();
     LOG.info("Started " + name);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
deleted file mode 100644
index 3bea0a7..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
-
-/**
- * Test {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}
- */
-@Category({ MiscTests.class, MediumTests.class })
-public class TestMetaTableLocator {
-
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestMetaTableLocator.class);
-
-  private static final Logger LOG = LoggerFactory.getLogger(TestMetaTableLocator.class);
-  private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
-  private static final ServerName SN =
-    ServerName.valueOf("example.org", 1234, EnvironmentEdgeManager.currentTime());
-  private ZKWatcher watcher;
-  private Abortable abortable;
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    // Set this down so tests run quicker
-    UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
-    UTIL.startMiniZKCluster();
-  }
-
-  @AfterClass
-  public static void afterClass() throws IOException {
-    UTIL.getZkCluster().shutdown();
-  }
-
-  @Before
-  public void before() throws IOException {
-    this.abortable = new Abortable() {
-      @Override
-      public void abort(String why, Throwable e) {
-        LOG.info(why, e);
-      }
-
-      @Override
-      public boolean isAborted() {
-        return false;
-      }
-    };
-    this.watcher =
-      new ZKWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), this.abortable, true);
-  }
-
-  @After
-  public void after() {
-    try {
-      // Clean out meta location or later tests will be confused... they presume
-      // start fresh in zk.
-      MetaTableLocator.deleteMetaLocation(this.watcher);
-    } catch (KeeperException e) {
-      LOG.warn("Unable to delete hbase:meta location", e);
-    }
-
-    this.watcher.close();
-  }
-
-  /**
-   * Test normal operations
-   */
-  @Test
-  public void testMetaLookup()
-      throws IOException, InterruptedException, ServiceException, KeeperException {
-    final ClientProtos.ClientService.BlockingInterface client =
-      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
-
-    Mockito.when(client.get((RpcController) Mockito.any(), (GetRequest) Mockito.any()))
-      .thenReturn(GetResponse.newBuilder().build());
-
-    assertNull(MetaTableLocator.getMetaRegionLocation(this.watcher));
-    for (RegionState.State state : RegionState.State.values()) {
-      if (state.equals(RegionState.State.OPEN)) {
-        continue;
-      }
-      MetaTableLocator.setMetaLocation(this.watcher, SN, state);
-      assertNull(MetaTableLocator.getMetaRegionLocation(this.watcher));
-      assertEquals(state, MetaTableLocator.getMetaRegionState(this.watcher).getState());
-    }
-    MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPEN);
-    assertEquals(SN, MetaTableLocator.getMetaRegionLocation(this.watcher));
-    assertEquals(RegionState.State.OPEN,
-      MetaTableLocator.getMetaRegionState(this.watcher).getState());
-
-    MetaTableLocator.deleteMetaLocation(this.watcher);
-    assertNull(MetaTableLocator.getMetaRegionState(this.watcher).getServerName());
-    assertEquals(RegionState.State.OFFLINE,
-      MetaTableLocator.getMetaRegionState(this.watcher).getState());
-    assertNull(MetaTableLocator.getMetaRegionLocation(this.watcher));
-  }
-
-  @Test(expected = NotAllMetaRegionsOnlineException.class)
-  public void testTimeoutWaitForMeta() throws IOException, InterruptedException {
-    MetaTableLocator.waitMetaRegionLocation(watcher, 100);
-  }
-
-  /**
-   * Test waiting on meat w/ no timeout specified.
-   */
-  @Test
-  public void testNoTimeoutWaitForMeta() throws IOException, InterruptedException, KeeperException {
-    ServerName hsa = MetaTableLocator.getMetaRegionLocation(watcher);
-    assertNull(hsa);
-
-    // Now test waiting on meta location getting set.
-    Thread t = new WaitOnMetaThread();
-    startWaitAliveThenWaitItLives(t, 1);
-    // Set a meta location.
-    MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPEN);
-    hsa = SN;
-    // Join the thread... should exit shortly.
-    t.join();
-    // Now meta is available.
-    assertTrue(MetaTableLocator.getMetaRegionLocation(watcher).equals(hsa));
-  }
-
-  private void startWaitAliveThenWaitItLives(final Thread t, final int ms) {
-    t.start();
-    UTIL.waitFor(2000, t::isAlive);
-    // Wait one second.
-    Threads.sleep(ms);
-    assertTrue("Assert " + t.getName() + " still waiting", t.isAlive());
-  }
-
-  /**
-   * Wait on META.
-   */
-  class WaitOnMetaThread extends Thread {
-
-    WaitOnMetaThread() {
-      super("WaitOnMeta");
-    }
-
-    @Override
-    public void run() {
-      try {
-        doWaiting();
-      } catch (InterruptedException e) {
-        throw new RuntimeException("Failed wait", e);
-      }
-      LOG.info("Exiting " + getName());
-    }
-
-    void doWaiting() throws InterruptedException {
-      try {
-        for (;;) {
-          if (MetaTableLocator.waitMetaRegionLocation(watcher, 10000) != null) {
-            break;
-          }
-        }
-      } catch (NotAllMetaRegionsOnlineException e) {
-        // Ignore
-      }
-    }
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java
index f14faf7..277f7bf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java
@@ -58,10 +58,7 @@ public abstract class AbstractTestRegionLocator {
     }
     UTIL.getAdmin().createTable(td, SPLIT_KEYS);
     UTIL.waitTableAvailable(TABLE_NAME);
-    try (ConnectionRegistry registry =
-      ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration())) {
-      RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry);
-    }
+    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL);
     UTIL.getAdmin().balancerSwitch(false, true);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java
index c9d67f4..ea1122c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyConnectionRegistry.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client;
 
 import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 
 /**
@@ -32,11 +31,6 @@ public class DummyConnectionRegistry implements ConnectionRegistry {
       HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY;
 
   @Override
-  public CompletableFuture<RegionLocations> getMetaRegionLocations() {
-    return null;
-  }
-
-  @Override
   public CompletableFuture<String> getClusterId() {
     return null;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
index d34b419..3b0fbe8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
@@ -26,6 +26,7 @@ import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
 import org.apache.hadoop.hbase.TableName;
@@ -34,7 +35,6 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.junit.AfterClass;
 import org.junit.Rule;
 import org.slf4j.Logger;
@@ -65,8 +65,11 @@ public class MetaWithReplicasTestBase {
     HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3);
     AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
     Set<ServerName> sns = new HashSet<ServerName>();
-    ServerName hbaseMetaServerName =
-      MetaTableLocator.getMetaRegionLocation(TEST_UTIL.getZooKeeperWatcher());
+    ServerName hbaseMetaServerName;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+      hbaseMetaServerName = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getServerName();
+    }
     LOG.info("HBASE:META DEPLOY: on " + hbaseMetaServerName);
     sns.add(hbaseMetaServerName);
     for (int replicaId = 1; replicaId < 3; replicaId++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java
index abb0c11..05a1051 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java
@@ -43,8 +43,7 @@ public final class RegionReplicaTestHelper {
   }
 
   // waits for all replicas to have region location
-  static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtil util,
-    ConnectionRegistry registry) throws IOException {
+  static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtil util) throws IOException {
     Configuration conf = util.getConfiguration();
     int regionReplicaCount =
       util.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication();
@@ -58,16 +57,20 @@ public final class RegionReplicaTestHelper {
         @Override
         public boolean evaluate() {
           try {
-            RegionLocations locs = registry.getMetaRegionLocations().get();
+            List<HRegionLocation> locs;
+            try (RegionLocator locator =
+              util.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+              locs = locator.getAllRegionLocations();
+            }
             if (locs.size() < regionReplicaCount) {
               return false;
             }
             for (int i = 0; i < regionReplicaCount; i++) {
-              HRegionLocation loc = locs.getRegionLocation(i);
+              HRegionLocation loc = locs.get(i);
               // Wait until the replica is served by a region server. There could be delay between
               // the replica being available to the connection and region server opening it.
               Optional<ServerName> rsCarryingReplica =
-                  getRSCarryingReplica(util, loc.getRegion().getTable(), i);
+                getRSCarryingReplica(util, loc.getRegion().getTable(), i);
               if (!rsCarryingReplica.isPresent()) {
                 return false;
               }
@@ -120,7 +123,7 @@ public final class RegionReplicaTestHelper {
 
   interface Locator {
     RegionLocations getRegionLocations(TableName tableName, int replicaId, boolean reload)
-        throws Exception;
+      throws Exception;
 
     void updateCachedLocationOnError(HRegionLocation loc, Throwable error) throws Exception;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java
index 81dafae..13e13bc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java
@@ -54,10 +54,7 @@ public class TestAsyncAdminWithRegionReplicas extends TestAsyncAdminBase {
   public static void setUpBeforeClass() throws Exception {
     TestAsyncAdminBase.setUpBeforeClass();
     HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
-    try (ConnectionRegistry registry =
-      ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration())) {
-      RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry);
-    }
+    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
   }
 
   private void testMoveNonDefaultReplica(TableName tableName)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java
index 480d797..6a06f32 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
 import static org.apache.hadoop.hbase.client.RegionReplicaTestHelper.testLocator;
 
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -44,24 +45,25 @@ public class TestAsyncMetaRegionLocator {
 
   private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
 
-  private static ConnectionRegistry REGISTRY;
+  private static AsyncConnectionImpl CONN;
 
-  private static AsyncMetaRegionLocator LOCATOR;
+  private static AsyncRegionLocator LOCATOR;
 
   @BeforeClass
   public static void setUp() throws Exception {
     TEST_UTIL.startMiniCluster(3);
     HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
     TEST_UTIL.waitUntilNoRegionsInTransition();
-    REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
-    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY);
+    CONN = (AsyncConnectionImpl) ConnectionFactory
+      .createAsyncConnection(TEST_UTIL.getConfiguration()).get();
+    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
     TEST_UTIL.getAdmin().balancerSwitch(false, true);
-    LOCATOR = new AsyncMetaRegionLocator(REGISTRY);
+    LOCATOR = new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER);
   }
 
   @AfterClass
   public static void tearDown() throws Exception {
-    Closeables.close(REGISTRY, true);
+    Closeables.close(CONN, true);
     TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -71,14 +73,15 @@ public class TestAsyncMetaRegionLocator {
 
       @Override
       public void updateCachedLocationOnError(HRegionLocation loc, Throwable error)
-          throws Exception {
+        throws Exception {
         LOCATOR.updateCachedLocationOnError(loc, error);
       }
 
       @Override
       public RegionLocations getRegionLocations(TableName tableName, int replicaId, boolean reload)
-          throws Exception {
-        return LOCATOR.getRegionLocations(replicaId, reload).get();
+        throws Exception {
+        return LOCATOR.getRegionLocations(tableName, EMPTY_START_ROW, replicaId,
+          RegionLocateType.CURRENT, reload).get();
       }
     });
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
index 576238c..42a2994 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
@@ -78,12 +78,13 @@ public class TestAsyncNonMetaRegionLocator {
 
   private static byte[] FAMILY = Bytes.toBytes("cf");
   private static final int NB_SERVERS = 4;
+
   private static final int NUM_OF_META_REPLICA = NB_SERVERS - 1;
 
   private static byte[][] SPLIT_KEYS;
 
   private AsyncConnectionImpl conn;
-  private AsyncNonMetaRegionLocator locator;
+  private AsyncRegionLocator locator;
 
   @Parameter
   public CatalogReplicaMode metaReplicaMode;
@@ -124,7 +125,7 @@ public class TestAsyncNonMetaRegionLocator {
       ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     conn =
       new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), null, User.getCurrent());
-    locator = new AsyncNonMetaRegionLocator(conn);
+    locator = new AsyncRegionLocator(conn, AsyncConnectionImpl.RETRY_TIMER);
   }
 
   @After
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java
index c9d47dc..7fadef3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java
@@ -51,8 +51,8 @@ import org.junit.runners.Parameterized;
 
 /**
  * Class to test asynchronous region admin operations.
- * @see TestAsyncRegionAdminApi This test and it used to be joined it was taking longer than our
- * ten minute timeout so they were split.
+ * @see TestAsyncRegionAdminApi This test and it used to be joined it was taking longer than our ten
+ *      minute timeout so they were split.
  */
 @RunWith(Parameterized.class)
 @Category({ LargeTests.class, ClientTests.class })
@@ -60,7 +60,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestAsyncRegionAdminApi2.class);
+    HBaseClassTestRule.forClass(TestAsyncRegionAdminApi2.class);
 
   @Test
   public void testGetRegionLocation() throws Exception {
@@ -79,13 +79,13 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
   @Test
   public void testSplitSwitch() throws Exception {
     createTableWithDefaultConf(tableName);
-    byte[][] families = {FAMILY};
+    byte[][] families = { FAMILY };
     final int rows = 10000;
     TestAsyncRegionAdminApi.loadData(tableName, families, rows);
 
     AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
     List<HRegionLocation> regionLocations =
-        ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
+      ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName, true).get();
     int originalCount = regionLocations.size();
 
     initSplitMergeSwitch();
@@ -93,7 +93,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
     try {
       admin.split(tableName, Bytes.toBytes(rows / 2)).join();
     } catch (Exception e) {
-      //Expected
+      // Expected
     }
     int count = admin.getRegions(tableName).get().size();
     assertTrue(originalCount == count);
@@ -111,12 +111,12 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
   // It was ignored in TestSplitOrMergeStatus, too
   public void testMergeSwitch() throws Exception {
     createTableWithDefaultConf(tableName);
-    byte[][] families = {FAMILY};
+    byte[][] families = { FAMILY };
     TestAsyncRegionAdminApi.loadData(tableName, families, 1000);
 
     AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
     List<HRegionLocation> regionLocations =
-        ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
+      ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName, true).get();
     int originalCount = regionLocations.size();
 
     initSplitMergeSwitch();
@@ -126,7 +126,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
       Threads.sleep(100);
     }
     assertTrue("originalCount=" + originalCount + ", postSplitCount=" + postSplitCount,
-        originalCount != postSplitCount);
+      originalCount != postSplitCount);
 
     // Merge switch is off so merge should NOT succeed.
     assertTrue(admin.mergeSwitch(false).get());
@@ -156,12 +156,12 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
 
   @Test
   public void testMergeRegions() throws Exception {
-    byte[][] splitRows = new byte[][]{Bytes.toBytes("3"), Bytes.toBytes("6")};
+    byte[][] splitRows = new byte[][] { Bytes.toBytes("3"), Bytes.toBytes("6") };
     createTableWithDefaultConf(tableName, splitRows);
 
     AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
     List<HRegionLocation> regionLocations = ClientMetaTableAccessor
-      .getTableHRegionLocations(metaTable, tableName).get();
+      .getTableHRegionLocations(metaTable, tableName, true).get();
     RegionInfo regionA;
     RegionInfo regionB;
     RegionInfo regionC;
@@ -175,7 +175,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
     admin.mergeRegions(regionA.getRegionName(), regionB.getRegionName(), false).get();
 
     regionLocations = ClientMetaTableAccessor
-      .getTableHRegionLocations(metaTable, tableName).get();
+      .getTableHRegionLocations(metaTable, tableName, true).get();
 
     assertEquals(2, regionLocations.size());
     for (HRegionLocation rl : regionLocations) {
@@ -195,11 +195,10 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
       Thread.sleep(200);
     }
     // merge with encoded name
-    admin.mergeRegions(regionC.getRegionName(), mergedChildRegion.getRegionName(),
-      false).get();
+    admin.mergeRegions(regionC.getRegionName(), mergedChildRegion.getRegionName(), false).get();
 
-    regionLocations = ClientMetaTableAccessor
-      .getTableHRegionLocations(metaTable, tableName).get();
+    regionLocations =
+      ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName, true).get();
     assertEquals(1, regionLocations.size());
   }
 
@@ -233,18 +232,18 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
     splitTest(TableName.valueOf("testSplitTable"), 3000, false, null);
     splitTest(TableName.valueOf("testSplitTableWithSplitPoint"), 3000, false, Bytes.toBytes("3"));
     splitTest(TableName.valueOf("testSplitTableRegion"), 3000, true, null);
-    splitTest(TableName.valueOf("testSplitTableRegionWithSplitPoint2"), 3000, true, Bytes.toBytes("3"));
+    splitTest(TableName.valueOf("testSplitTableRegionWithSplitPoint2"), 3000, true,
+      Bytes.toBytes("3"));
   }
 
-  private void
-  splitTest(TableName tableName, int rowCount, boolean isSplitRegion, byte[] splitPoint)
-      throws Exception {
+  private void splitTest(TableName tableName, int rowCount, boolean isSplitRegion,
+    byte[] splitPoint) throws Exception {
     // create table
     createTableWithDefaultConf(tableName);
 
     AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
     List<HRegionLocation> regionLocations = ClientMetaTableAccessor
-      .getTableHRegionLocations(metaTable, tableName).get();
+      .getTableHRegionLocations(metaTable, tableName, true).get();
     assertEquals(1, regionLocations.size());
 
     AsyncTable<?> table = ASYNC_CONN.getTable(tableName);
@@ -273,8 +272,8 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
     int count = 0;
     for (int i = 0; i < 45; i++) {
       try {
-        regionLocations = ClientMetaTableAccessor
-          .getTableHRegionLocations(metaTable, tableName).get();
+        regionLocations =
+          ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName, true).get();
         count = regionLocations.size();
         if (count >= 2) {
           break;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorConcurrenyLimit.java
similarity index 90%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorConcurrenyLimit.java
index 690a384..c2d001a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorConcurrenyLimit.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hbase.client;
 
 import static java.util.stream.Collectors.toList;
-import static org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE;
+import static org.apache.hadoop.hbase.client.AsyncRegionLocator.MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
 import static org.apache.hadoop.hbase.coprocessor.CoprocessorHost.REGION_COPROCESSOR_CONF_KEY;
@@ -57,11 +57,11 @@ import org.junit.experimental.categories.Category;
 import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
 
 @Category({ MediumTests.class, ClientTests.class })
-public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
+public class TestAsyncRegionLocatorConcurrenyLimit {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestAsyncNonMetaRegionLocatorConcurrenyLimit.class);
+    HBaseClassTestRule.forClass(TestAsyncRegionLocatorConcurrenyLimit.class);
 
   private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
 
@@ -71,7 +71,7 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
 
   private static AsyncConnectionImpl CONN;
 
-  private static AsyncNonMetaRegionLocator LOCATOR;
+  private static AsyncRegionLocator LOCATOR;
 
   private static byte[][] SPLIT_KEYS;
 
@@ -90,7 +90,7 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
 
     @Override
     public boolean preScannerNext(ObserverContext<RegionCoprocessorEnvironment> c,
-        InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException {
+      InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException {
       if (c.getEnvironment().getRegionInfo().isMetaRegion()) {
         int concurrency = CONCURRENCY.incrementAndGet();
         for (;;) {
@@ -109,7 +109,7 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
 
     @Override
     public boolean postScannerNext(ObserverContext<RegionCoprocessorEnvironment> c,
-        InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException {
+      InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException {
       if (c.getEnvironment().getRegionInfo().isMetaRegion()) {
         CONCURRENCY.decrementAndGet();
       }
@@ -125,10 +125,10 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
     TEST_UTIL.startMiniCluster(3);
     TEST_UTIL.getAdmin().balancerSwitch(false, true);
     ConnectionRegistry registry =
-        ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
+      ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
       registry.getClusterId().get(), null, User.getCurrent());
-    LOCATOR = new AsyncNonMetaRegionLocator(CONN);
+    LOCATOR = new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER);
     SPLIT_KEYS = IntStream.range(1, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i)))
       .toArray(byte[][]::new);
     TEST_UTIL.createTable(TABLE_NAME, FAMILY, SPLIT_KEYS);
@@ -142,7 +142,7 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
   }
 
   private void assertLocs(List<CompletableFuture<RegionLocations>> futures)
-      throws InterruptedException, ExecutionException {
+    throws InterruptedException, ExecutionException {
     assertEquals(256, futures.size());
     for (int i = 0; i < futures.size(); i++) {
       HRegionLocation loc = futures.get(i).get().getDefaultRegionLocation();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 572a1d5..42d1919 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -45,8 +45,8 @@ import org.junit.runners.Parameterized;
 
 /**
  * Class to test asynchronous table admin operations.
- * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our
- *     ten minute timeout so they were split.
+ * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our ten
+ *      minute timeout so they were split.
  * @see TestAsyncTableAdminApi3 Another split out from this class so each runs under ten minutes.
  */
 @RunWith(Parameterized.class)
@@ -55,7 +55,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestAsyncTableAdminApi.class);
+    HBaseClassTestRule.forClass(TestAsyncTableAdminApi.class);
 
   @Test
   public void testCreateTable() throws Exception {
@@ -65,13 +65,13 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     tables = admin.listTableDescriptors().get();
     assertEquals(numTables + 1, tables.size());
     assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster()
-        .getTableStateManager().isTableState(tableName, TableState.State.ENABLED));
+      .getTableStateManager().isTableState(tableName, TableState.State.ENABLED));
     assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName));
   }
 
   static TableState.State getStateFromMeta(TableName table) throws Exception {
     Optional<TableState> state = ClientMetaTableAccessor
-        .getTableState(ASYNC_CONN.getTable(TableName.META_TABLE_NAME), table).get();
+      .getTableState(ASYNC_CONN.getTable(TableName.META_TABLE_NAME), table).get();
     assertTrue(state.isPresent());
     return state.get().getState();
   }
@@ -82,19 +82,21 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
 
     createTableWithDefaultConf(tableName);
     List<HRegionLocation> regionLocations = ClientMetaTableAccessor
-      .getTableHRegionLocations(metaTable, tableName).get();
+      .getTableHRegionLocations(metaTable, tableName, true).get();
     assertEquals("Table should have only 1 region", 1, regionLocations.size());
 
     final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "_2");
     createTableWithDefaultConf(tableName2, new byte[][] { new byte[] { 42 } });
-    regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2).get();
+    regionLocations =
+      ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2, true).get();
     assertEquals("Table should have only 2 region", 2, regionLocations.size());
 
     final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "_3");
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName3);
     builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
     admin.createTable(builder.build(), Bytes.toBytes("a"), Bytes.toBytes("z"), 3).join();
-    regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3).get();
+    regionLocations =
+      ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3, true).get();
     assertEquals("Table should have only 3 region", 3, regionLocations.size());
 
     final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "_4");
@@ -111,7 +113,8 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     builder = TableDescriptorBuilder.newBuilder(tableName5);
     builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
     admin.createTable(builder.build(), new byte[] { 1 }, new byte[] { 127 }, 16).join();
-    regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName5).get();
+    regionLocations =
+      ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName5, true).get();
     assertEquals("Table should have 16 region", 16, regionLocations.size());
   }
 
@@ -128,7 +131,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
 
     AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
     List<HRegionLocation> regions = ClientMetaTableAccessor
-      .getTableHRegionLocations(metaTable, tableName).get();
+      .getTableHRegionLocations(metaTable, tableName, true).get();
     Iterator<HRegionLocation> hris = regions.iterator();
 
     assertEquals(
@@ -183,7 +186,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
     admin.createTable(builder.build(), startKey, endKey, expectedRegions).join();
 
-    regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2).get();
+    regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2, true).get();
     assertEquals(
       "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
       expectedRegions, regions.size());
@@ -231,8 +234,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
     admin.createTable(builder.build(), startKey, endKey, expectedRegions).join();
 
-    regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3)
-      .get();
+    regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3, true).get();
     assertEquals(
       "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
       expectedRegions, regions.size());
@@ -296,7 +298,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
   }
 
   private void testTruncateTable(final TableName tableName, boolean preserveSplits)
-      throws Exception {
+    throws Exception {
     byte[][] splitKeys = new byte[2][];
     splitKeys[0] = Bytes.toBytes(4);
     splitKeys[1] = Bytes.toBytes(8);
@@ -337,8 +339,8 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     testCloneTableSchema(tableName, newTableName, true);
   }
 
-  private void testCloneTableSchema(final TableName tableName,
-      final TableName newTableName, boolean preserveSplits) throws Exception {
+  private void testCloneTableSchema(final TableName tableName, final TableName newTableName,
+    boolean preserveSplits) throws Exception {
     byte[][] splitKeys = new byte[2][];
     splitKeys[0] = Bytes.toBytes(4);
     splitKeys[1] = Bytes.toBytes(8);
@@ -349,20 +351,16 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     boolean BLOCK_CACHE = false;
 
     // Create the table
-    TableDescriptor tableDesc = TableDescriptorBuilder
-        .newBuilder(tableName)
-        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
-        .setColumnFamily(ColumnFamilyDescriptorBuilder
-            .newBuilder(FAMILY_1)
-            .setBlocksize(BLOCK_SIZE)
-            .setBlockCacheEnabled(BLOCK_CACHE)
-            .setTimeToLive(TTL)
-            .build()).build();
+    TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
+      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
+      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_1).setBlocksize(BLOCK_SIZE)
+        .setBlockCacheEnabled(BLOCK_CACHE).setTimeToLive(TTL).build())
+      .build();
     admin.createTable(tableDesc, splitKeys).join();
 
     assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(tableName).size());
     assertTrue("Table should be created with splitKyes + 1 rows in META",
-        admin.isTableAvailable(tableName).get());
+      admin.isTableAvailable(tableName).get());
 
     // Clone & Verify
     admin.cloneTableSchema(tableName, newTableName, preserveSplits).join();
@@ -377,7 +375,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     if (preserveSplits) {
       assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size());
       assertTrue("New table should be created with splitKyes + 1 rows in META",
-          admin.isTableAvailable(newTableName).get());
+        admin.isTableAvailable(newTableName).get());
     } else {
       assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size());
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
index 4a71baf..793d159 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
@@ -46,15 +46,15 @@ import org.junit.runners.Parameterized;
 
 /**
  * Class to test asynchronous table admin operations.
- * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our
- * ten minute timeout so they were split.
+ * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our ten
+ *      minute timeout so they were split.
  */
 @RunWith(Parameterized.class)
 @Category({ LargeTests.class, ClientTests.class })
 public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestAsyncTableAdminApi3.class);
+    HBaseClassTestRule.forClass(TestAsyncTableAdminApi3.class);
 
   @Test
   public void testTableExist() throws Exception {
@@ -122,7 +122,7 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
     assertEquals(tables.length + 1, size);
     for (int i = 0, j = 0; i < tables.length && j < size; i++, j++) {
       assertTrue("tableName should be equal in order",
-          tableDescs.get(j).getTableName().equals(tables[i]));
+        tableDescs.get(j).getTableName().equals(tables[i]));
     }
     assertTrue(tableDescs.get(size - 1).getTableName().equals(TableName.META_TABLE_NAME));
 
@@ -166,7 +166,7 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
 
     this.admin.disableTable(tableName).join();
     assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster().getMaster()
-        .getTableStateManager().isTableState(tableName, TableState.State.DISABLED));
+      .getTableStateManager().isTableState(tableName, TableState.State.DISABLED));
     assertEquals(TableState.State.DISABLED, TestAsyncTableAdminApi.getStateFromMeta(tableName));
 
     // Test that table is disabled
@@ -188,7 +188,7 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
     assertTrue(ok);
     this.admin.enableTable(tableName).join();
     assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster()
-        .getTableStateManager().isTableState(tableName, TableState.State.ENABLED));
+      .getTableStateManager().isTableState(tableName, TableState.State.ENABLED));
     assertEquals(TableState.State.ENABLED, TestAsyncTableAdminApi.getStateFromMeta(tableName));
 
     // Test that table is enabled
@@ -230,7 +230,7 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
     table2.get(get).get();
 
     admin.listTableNames(Pattern.compile(tableName.getNameAsString() + ".*"), false).get()
-        .forEach(t -> admin.disableTable(t).join());
+      .forEach(t -> admin.disableTable(t).join());
 
     // Test that tables are disabled
     get = new Get(row);
@@ -254,7 +254,7 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
     assertEquals(TableState.State.DISABLED, TestAsyncTableAdminApi.getStateFromMeta(tableName2));
 
     admin.listTableNames(Pattern.compile(tableName.getNameAsString() + ".*"), false).get()
-        .forEach(t -> admin.enableTable(t).join());
+      .forEach(t -> admin.enableTable(t).join());
 
     // Test that tables are enabled
     try {
@@ -281,8 +281,8 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
     createTableWithDefaultConf(tableName, splitKeys);
 
     AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
-    List<HRegionLocation> regions = ClientMetaTableAccessor
-      .getTableHRegionLocations(metaTable, tableName).get();
+    List<HRegionLocation> regions =
+      ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName, true).get();
     assertEquals(
       "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
       expectedRegions, regions.size());
@@ -292,8 +292,8 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
     // Enable table, use retain assignment to assign regions.
     admin.enableTable(tableName).join();
 
-    List<HRegionLocation> regions2 = ClientMetaTableAccessor
-      .getTableHRegionLocations(metaTable, tableName).get();
+    List<HRegionLocation> regions2 =
+      ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName, true).get();
     // Check the assignment.
     assertEquals(regions.size(), regions2.size());
     assertTrue(regions2.containsAll(regions));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java
index 245d755..ff186f5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
 import static org.junit.Assert.assertNotNull;
 
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.TableName;
@@ -47,18 +48,19 @@ public class TestAsyncTableLocatePrefetch {
 
   private static byte[] FAMILY = Bytes.toBytes("cf");
 
-  private static AsyncConnection CONN;
+  private static AsyncConnectionImpl CONN;
 
-  private static AsyncNonMetaRegionLocator LOCATOR;
+  private static AsyncRegionLocator LOCATOR;
 
   @BeforeClass
   public static void setUp() throws Exception {
-    TEST_UTIL.getConfiguration().setInt(AsyncNonMetaRegionLocator.LOCATE_PREFETCH_LIMIT, 100);
+    TEST_UTIL.getConfiguration().setInt(AsyncRegionLocator.LOCATE_PREFETCH_LIMIT, 100);
     TEST_UTIL.startMiniCluster(3);
     TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY);
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
-    CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
-    LOCATOR = new AsyncNonMetaRegionLocator((AsyncConnectionImpl) CONN);
+    CONN = (AsyncConnectionImpl) ConnectionFactory
+      .createAsyncConnection(TEST_UTIL.getConfiguration()).get();
+    LOCATOR = new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER);
   }
 
   @AfterClass
@@ -70,7 +72,7 @@ public class TestAsyncTableLocatePrefetch {
   @Test
   public void test() throws InterruptedException, ExecutionException {
     assertNotNull(LOCATOR.getRegionLocations(TABLE_NAME, Bytes.toBytes("zzz"),
-      RegionReplicaUtil.DEFAULT_REPLICA_ID, RegionLocateType.CURRENT, false).get());
+      RegionLocateType.CURRENT, false, TimeUnit.MINUTES.toNanos(1)).get());
     // we finish the request before we adding the remaining results to cache so sleep a bit here
     Thread.sleep(1000);
     // confirm that the locations of all the regions have been cached.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java
index 6c538f5..77ee820 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java
@@ -84,8 +84,7 @@ public class TestAsyncTableRSCrashPublish {
   public void test() throws IOException, ExecutionException, InterruptedException {
     Configuration conf = UTIL.getHBaseCluster().getMaster().getConfiguration();
     try (AsyncConnection connection = ConnectionFactory.createAsyncConnection(conf).get()) {
-      AsyncNonMetaRegionLocator locator =
-        ((AsyncConnectionImpl) connection).getLocator().getNonMetaRegionLocator();
+      AsyncRegionLocator locator = ((AsyncConnectionImpl) connection).getLocator();
       connection.getTable(TABLE_NAME).get(new Get(Bytes.toBytes(0))).join();
       ServerName serverName =
         locator.getRegionLocationInCache(TABLE_NAME, HConstants.EMPTY_START_ROW)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java
index 61bb163..6a32ff8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java
@@ -92,9 +92,7 @@ public class TestAsyncTableUseMetaReplicas {
       FailPrimaryMetaScanCp.class.getName());
     UTIL.startMiniCluster(3);
     HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
-    try (ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf)) {
-      RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry);
-    }
+    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL);
     try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) {
       table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE));
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
index bebc843..4143d4a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
@@ -23,10 +23,13 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.security.User;
@@ -96,8 +99,9 @@ public class TestCatalogReplicaLoadBalanceSimpleSelector {
       .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> {
         int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
         try {
-          RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get
-            (CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
+          List<HRegionLocation> metaLocations = CONN.getRegionLocator(TableName.META_TABLE_NAME)
+            .getRegionLocations(HConstants.EMPTY_START_ROW, true)
+            .get(CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
           numOfReplicas = metaLocations.size();
         } catch (Exception e) {
           LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
@@ -119,8 +123,9 @@ public class TestCatalogReplicaLoadBalanceSimpleSelector {
         replicaSelectorClass, META_TABLE_NAME, CONN, () -> {
         int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
         try {
-          RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get(
-            CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
+          List<HRegionLocation> metaLocations = CONN.getRegionLocator(TableName.META_TABLE_NAME)
+            .getRegionLocations(HConstants.EMPTY_START_ROW, true)
+            .get(CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
           numOfReplicas = metaLocations.size();
         } catch (Exception e) {
           LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
index e4bdff9..1044d2d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
@@ -119,7 +119,7 @@ public class TestMasterRegistry {
       try (MasterRegistry registry = new MasterRegistry(conf)) {
         // Add wait on all replicas being assigned before proceeding w/ test. Failed on occasion
         // because not all replicas had made it up before test started.
-        RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry);
+        RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
         assertEquals(registry.getClusterId().get(), activeMaster.getClusterId());
         assertEquals(registry.getActiveMaster().get(), activeMaster.getServerName());
         List<HRegionLocation> metaLocations =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
index 2197a21..d79f8ca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
@@ -49,11 +49,11 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
 
-@Category({SmallTests.class, MasterTests.class })
+@Category({ SmallTests.class, MasterTests.class })
 public class TestMetaRegionLocationCache {
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestMetaRegionLocationCache.class);
+    HBaseClassTestRule.forClass(TestMetaRegionLocationCache.class);
 
   private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
   private static ConnectionRegistry REGISTRY;
@@ -63,7 +63,7 @@ public class TestMetaRegionLocationCache {
     TEST_UTIL.startMiniCluster(3);
     HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
     REGISTRY = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
-    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY);
+    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
     TEST_UTIL.getAdmin().balancerSwitch(false, true);
   }
 
@@ -75,7 +75,7 @@ public class TestMetaRegionLocationCache {
 
   private List<HRegionLocation> getCurrentMetaLocations(ZKWatcher zk) throws Exception {
     List<HRegionLocation> result = new ArrayList<>();
-    for (String znode: zk.getMetaReplicaNodes()) {
+    for (String znode : zk.getMetaReplicaNodes()) {
       String path = ZNodePaths.joinZNode(zk.getZNodePaths().baseZNode, znode);
       int replicaId = zk.getZNodePaths().getMetaReplicaIdFromPath(path);
       RegionState state = MetaTableLocator.getMetaRegionState(zk, replicaId);
@@ -95,7 +95,7 @@ public class TestMetaRegionLocationCache {
       }
     }
     List<HRegionLocation> metaHRLs =
-        master.getMetaRegionLocationCache().getMetaRegionLocations().get();
+      master.getMetaRegionLocationCache().getMetaRegionLocations().get();
     assertFalse(metaHRLs.isEmpty());
     ZKWatcher zk = master.getZooKeeper();
     List<String> metaZnodes = zk.getMetaReplicaNodes();
@@ -115,11 +115,13 @@ public class TestMetaRegionLocationCache {
     assertEquals(actualHRLs, metaHRLs);
   }
 
-  @Test public void testInitialMetaLocations() throws Exception {
+  @Test
+  public void testInitialMetaLocations() throws Exception {
     verifyCachedMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster());
   }
 
-  @Test public void testStandByMetaLocations() throws Exception {
+  @Test
+  public void testStandByMetaLocations() throws Exception {
     HMaster standBy = TEST_UTIL.getMiniHBaseCluster().startMaster().getMaster();
     standBy.isInitialized();
     verifyCachedMetaLocations(standBy);
@@ -128,16 +130,17 @@ public class TestMetaRegionLocationCache {
   /*
    * Shuffles the meta region replicas around the cluster and makes sure the cache is not stale.
    */
-  @Test public void testMetaLocationsChange() throws Exception {
+  @Test
+  public void testMetaLocationsChange() throws Exception {
     List<HRegionLocation> currentMetaLocs =
-        getCurrentMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper());
+      getCurrentMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper());
     // Move these replicas to random servers.
-    for (HRegionLocation location: currentMetaLocs) {
+    for (HRegionLocation location : currentMetaLocs) {
       RegionReplicaTestHelper.moveRegion(TEST_UTIL, location);
     }
-    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY);
-    for (JVMClusterUtil.MasterThread masterThread:
-        TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) {
+    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
+    for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster()
+      .getMasterThreads()) {
       verifyCachedMetaLocations(masterThread.getMaster());
     }
   }
@@ -146,7 +149,8 @@ public class TestMetaRegionLocationCache {
    * Tests MetaRegionLocationCache's init procedure to make sure that it correctly watches the base
    * znode for notifications.
    */
-  @Test public void testMetaRegionLocationCache() throws Exception {
+  @Test
+  public void testMetaRegionLocationCache() throws Exception {
     final String parentZnodeName = "/randomznodename";
     Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
     conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parentZnodeName);
@@ -156,7 +160,8 @@ public class TestMetaRegionLocationCache {
       // some ZK activity in the background.
       MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf);
       ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
-        @Override public void doAnAction() throws Exception {
+        @Override
+        public void doAnAction() throws Exception {
           final String testZnode = parentZnodeName + "/child";
           ZKUtil.createNodeIfNotExistsAndWatch(zkWatcher, testZnode, testZnode.getBytes());
           ZKUtil.deleteNode(zkWatcher, testZnode);
@@ -176,8 +181,8 @@ public class TestMetaRegionLocationCache {
         // Wait until the meta cache is populated.
         int iters = 0;
         while (iters++ < 10) {
-          if (metaCache.getMetaRegionLocations().isPresent()
-            && metaCache.getMetaRegionLocations().get().size() == 3) {
+          if (metaCache.getMetaRegionLocations().isPresent() &&
+            metaCache.getMetaRegionLocations().get().size() == 3) {
             break;
           }
           Thread.sleep(1000);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java
index e7c872d..ba5c9d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java
@@ -118,6 +118,10 @@ public class TestMetaWithReplicasShutdownHandling extends MetaWithReplicasTestBa
           Thread.sleep(
             conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 3);
         }
+        // cache the location for all the meta regions.
+        try (RegionLocator locator = c.getRegionLocator(TableName.META_TABLE_NAME)) {
+          locator.getAllRegionLocations();
+        }
         // Ensure all metas are not on same hbase:meta replica=0 server!
 
         master = util.getHBaseClusterInterface().getClusterMetrics().getMasterName();
@@ -131,7 +135,6 @@ public class TestMetaWithReplicasShutdownHandling extends MetaWithReplicasTestBa
           util.getHBaseClusterInterface().killRegionServer(primary);
           util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000);
         }
-        c.clearRegionLocationCache();
       }
       LOG.info("Running GETs");
       try (Table htable = c.getTable(TABLE)) {
@@ -150,15 +153,15 @@ public class TestMetaWithReplicasShutdownHandling extends MetaWithReplicasTestBa
         util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0);
         util.getHBaseClusterInterface().waitForActiveAndReadyMaster();
         LOG.info("Master active!");
-        c.clearRegionLocationCache();
       }
     }
     conf.setBoolean(HConstants.USE_META_REPLICAS, false);
     LOG.info("Running GETs no replicas");
-    try (Connection c = ConnectionFactory.createConnection(conf);
-      Table htable = c.getTable(TABLE)) {
-      Result r = htable.get(new Get(row));
-      assertArrayEquals(row, r.getRow());
+    try (Connection c = ConnectionFactory.createConnection(conf)) {
+      try (Table htable = c.getTable(TABLE)) {
+        Result r = htable.get(new Get(row));
+        assertArrayEquals(r.getRow(), row);
+      }
     }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 4828cea..6bf61ef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -210,7 +210,7 @@ public class TestReplicasClient {
 
     // No master
     LOG.info("Master is going to be stopped");
-    TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
+    TestRegionServerNoMaster.stopMasterAndCacheMetaLocation(HTU);
     Configuration c = new Configuration(HTU.getConfiguration());
     c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
     LOG.info("Master has stopped");
@@ -224,7 +224,9 @@ public class TestReplicasClient {
 
   @Before
   public void before() throws IOException {
-    HTU.getConnection().clearRegionLocationCache();
+    try (RegionLocator locator = HTU.getConnection().getRegionLocator(TABLE_NAME)) {
+      locator.clearRegionLocationCache();
+    }
     try {
       openRegion(hriPrimary);
     } catch (Exception ignored) {
@@ -246,7 +248,6 @@ public class TestReplicasClient {
       closeRegion(hriPrimary);
     } catch (Exception ignored) {
     }
-    HTU.getConnection().clearRegionLocationCache();
   }
 
   private HRegionServer getRS() {
@@ -325,16 +326,15 @@ public class TestReplicasClient {
     byte[] b1 = Bytes.toBytes("testLocations");
     openRegion(hriSecondary);
 
-    try (Connection conn = ConnectionFactory.createConnection(HTU.getConfiguration());
-        RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) {
-      conn.clearRegionLocationCache();
+    try (RegionLocator locator = HTU.getConnection().getRegionLocator(TABLE_NAME)) {
+      locator.clearRegionLocationCache();
       List<HRegionLocation> rl = locator.getRegionLocations(b1, true);
       Assert.assertEquals(2, rl.size());
 
       rl = locator.getRegionLocations(b1, false);
       Assert.assertEquals(2, rl.size());
 
-      conn.clearRegionLocationCache();
+      locator.clearRegionLocationCache();
       rl = locator.getRegionLocations(b1, false);
       Assert.assertEquals(2, rl.size());
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
index 4894c52..c00dd39 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
@@ -83,8 +83,7 @@ public class TestZKConnectionRegistry {
       clusterId);
     assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(),
       REGISTRY.getActiveMaster().get());
-    RegionReplicaTestHelper
-      .waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY);
+    RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL);
     RegionLocations locs = REGISTRY.getMetaRegionLocations().get();
     assertEquals(3, locs.getRegionLocations().length);
     IntStream.range(0, 3).forEach(i -> {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 933addf..21dd128 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
@@ -520,4 +521,9 @@ public class MockNoopMasterServices implements MasterServices {
   public MetaLocationSyncer getMetaLocationSyncer() {
     return null;
   }
+
+  public List<RegionLocations> getAllMetaRegionLocations(boolean excludeOfflinedSplitParents)
+    throws IOException {
+    return null;
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index 9e0333b..d7b4eda 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.ClusterMetrics;
@@ -28,12 +29,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
-import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
@@ -166,54 +164,5 @@ public class TestMasterFailover {
       TEST_UTIL.shutdownMiniCluster();
     }
   }
-
-  /**
-   * Test meta in transition when master failover.
-   * This test used to manipulate region state up in zk. That is not allowed any more in hbase2
-   * so I removed that messing. That makes this test anemic.
-   */
-  @Test
-  public void testMetaInTransitionWhenMasterFailover() throws Exception {
-    // Start the cluster
-    HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
-    TEST_UTIL.startMiniCluster();
-    try {
-      SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-      LOG.info("Cluster started");
-
-      HMaster activeMaster = cluster.getMaster();
-      ServerName metaServerName = cluster.getServerHoldingMeta();
-      HRegionServer hrs = cluster.getRegionServer(metaServerName);
-
-      // Now kill master, meta should remain on rs, where we placed it before.
-      LOG.info("Aborting master");
-      activeMaster.abort("test-kill");
-      cluster.waitForMasterToStop(activeMaster.getServerName(), 30000);
-      LOG.info("Master has aborted");
-
-      // meta should remain where it was
-      RegionState metaState = MetaTableLocator.getMetaRegionState(hrs.getZooKeeper());
-      assertEquals("hbase:meta should be online on RS",
-          metaState.getServerName(), metaServerName);
-      assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());
-
-      // Start up a new master
-      LOG.info("Starting up a new master");
-      activeMaster = cluster.startMaster().getMaster();
-      LOG.info("Waiting for master to be ready");
-      cluster.waitForActiveAndReadyMaster();
-      LOG.info("Master is ready");
-
-      // ensure meta is still deployed on RS
-      metaState = MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper());
-      assertEquals("hbase:meta should be online on RS",
-          metaState.getServerName(), metaServerName);
-      assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());
-
-      // Done, shutdown the cluster
-    } finally {
-      TEST_UTIL.shutdownMiniCluster();
-    }
-  }
 }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
index 6ad4f08..b216eb4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.ServerName;
@@ -90,7 +91,20 @@ public class TestMetaAssignmentWithStopMaster {
         }
       }
 
-      ServerName newMetaServer = locator.getAllRegionLocations().get(0).getServerName();
+      ServerName newMetaServer;
+      startTime = System.currentTimeMillis();
+      for (;;) {
+        try {
+          newMetaServer = locator.getAllRegionLocations().get(0).getServerName();
+          break;
+        } catch (IOException e) {
+          LOG.warn("failed to get all locations, retry...", e);
+        }
+        Thread.sleep(3000);
+        if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
+          fail("Wait too long for getting the new meta location");
+        }
+      }
       assertTrue("The new meta server " + newMetaServer + " should be same with" +
         " the old meta server " + oldMetaServer, newMetaServer.equals(oldMetaServer));
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
index cf35ae2..29bafe3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.master;
 
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -35,7 +34,6 @@ import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.zookeeper.KeeperException;
@@ -100,8 +98,6 @@ public class TestMetaShutdownHandler {
       metaServerName =
         regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
     }
-    RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper());
-    assertEquals("Wrong state for meta!", RegionState.State.OPEN, metaState.getState());
     assertNotEquals("Meta is on master!", metaServerName, master.getServerName());
     HRegionServer metaRegionServer = cluster.getRegionServer(metaServerName);
 
@@ -129,11 +125,9 @@ public class TestMetaShutdownHandler {
     assertTrue("Meta should be assigned",
       regionStates.isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO));
     // Now, make sure meta is registered in zk
-    metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper());
-    assertEquals("Meta should not be in transition", RegionState.State.OPEN, metaState.getState());
-    assertEquals("Meta should be assigned", metaState.getServerName(),
-      regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO));
-    assertNotEquals("Meta should be assigned on a different server", metaState.getServerName(),
+    ServerName newMetaServerName =
+      regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    assertNotEquals("Meta should be assigned on a different server", newMetaServerName,
       metaServerName);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java
index f308a71..b1f5491 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java
@@ -20,10 +20,10 @@ package org.apache.hadoop.hbase.master.assignment;
 
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
@@ -50,15 +50,12 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 @Category({ RegionServerTests.class, MediumTests.class })
 public class TestRegionReplicaSplit {
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestRegionReplicaSplit.class);
-  private static final Logger LOG = LoggerFactory.getLogger(TestRegionReplicaSplit.class);
 
   private static final int NB_SERVERS = 4;
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java
index a4a3f86..0ea4f75 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java
@@ -165,7 +165,7 @@ public class TestCompactionLifeCycleTracker {
                         .setValue(Bytes.toBytes(i))
                         .build()));
       }
-      UTIL.getAdmin().flush(NAME);
+      UTIL.flush(NAME);
       for (int i = 100; i < 200; i++) {
         byte[] row = Bytes.toBytes(i);
         table.put(new Put(row)
@@ -178,7 +178,7 @@ public class TestCompactionLifeCycleTracker {
                         .setValue(Bytes.toBytes(i))
                         .build()));
       }
-      UTIL.getAdmin().flush(NAME);
+      UTIL.flush(NAME);
     }
     region = UTIL.getHBaseCluster().getRegions(NAME).get(0);
     assertEquals(2, region.getStore(CF1).getStorefilesCount());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
index 61f8fe8..f7af83b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
@@ -111,7 +111,7 @@ public class TestRegionReplicas {
     hriSecondary = RegionReplicaUtil.getRegionInfoForReplica(hriPrimary, 1);
 
     // No master
-    TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
+    TestRegionServerNoMaster.stopMasterAndCacheMetaLocation(HTU);
   }
 
   @AfterClass
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index 4260b1d..71e92fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -35,10 +34,9 @@ import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -90,15 +88,24 @@ public class TestRegionServerNoMaster {
     }
     regionName = hri.getRegionName();
 
-    stopMasterAndAssignMeta(HTU);
+    stopMasterAndCacheMetaLocation(HTU);
   }
 
-  public static void stopMasterAndAssignMeta(HBaseTestingUtil HTU)
-      throws IOException, InterruptedException {
+  public static void stopMasterAndCacheMetaLocation(HBaseTestingUtil HTU)
+    throws IOException, InterruptedException {
+    // cache meta location, so we will not go to master to lookup meta region location
+    for (JVMClusterUtil.RegionServerThread t : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
+      try (RegionLocator locator =
+        t.getRegionServer().getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+        locator.getAllRegionLocations();
+      }
+    }
+    try (RegionLocator locator = HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+      locator.getAllRegionLocations();
+    }
     // Stop master
     HMaster master = HTU.getHBaseCluster().getMaster();
     Thread masterThread = HTU.getHBaseCluster().getMasterThread();
-    ServerName masterAddr = master.getServerName();
     master.stopMaster();
 
     LOG.info("Waiting until master thread exits");
@@ -107,27 +114,6 @@ public class TestRegionServerNoMaster {
     }
 
     HRegionServer.TEST_SKIP_REPORTING_TRANSITION = true;
-    // Master is down, so is the meta. We need to assign it somewhere
-    // so that regions can be assigned during the mocking phase.
-    HRegionServer hrs = HTU.getHBaseCluster()
-      .getLiveRegionServerThreads().get(0).getRegionServer();
-    ZKWatcher zkw = hrs.getZooKeeper();
-    ServerName sn = MetaTableLocator.getMetaRegionLocation(zkw);
-    if (sn != null && !masterAddr.equals(sn)) {
-      return;
-    }
-
-    ProtobufUtil.openRegion(null, hrs.getRSRpcServices(),
-      hrs.getServerName(), RegionInfoBuilder.FIRST_META_REGIONINFO);
-    while (true) {
-      sn = MetaTableLocator.getMetaRegionLocation(zkw);
-      if (sn != null && sn.equals(hrs.getServerName())
-          && hrs.getOnlineRegions().containsKey(
-            RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName())) {
-        break;
-      }
-      Thread.sleep(100);
-    }
   }
 
   /** Flush the given region in the mini cluster. Since no master, we cannot use HBaseAdmin.flush() */
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
index 74de90b..7f56095 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -55,7 +54,6 @@ import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext;
-import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -133,8 +131,12 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
     // mock a secondary region info to open
     hriSecondary = RegionReplicaUtil.getRegionInfoForReplica(hriPrimary, 1);
 
+    // cache the location for meta regions
+    try (RegionLocator locator = HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+      locator.getAllRegionLocations();
+    }
     // No master
-    TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
+    TestRegionServerNoMaster.stopMasterAndCacheMetaLocation(HTU);
     rs0 = HTU.getMiniHBaseCluster().getRegionServer(0);
     rs1 = HTU.getMiniHBaseCluster().getRegionServer(1);
   }
@@ -186,11 +188,9 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
     HTU.loadNumericRows(table, f, 0, 1000);
 
     Assert.assertEquals(1000, entries.size());
-    try (AsyncClusterConnection conn = ClusterConnectionFactory
-      .createAsyncClusterConnection(HTU.getConfiguration(), null, User.getCurrent())) {
-      // replay the edits to the secondary using replay callable
-      replicateUsingCallable(conn, entries);
-    }
+    AsyncClusterConnection conn = HTU.getAsyncConnection();
+    // replay the edits to the secondary using replay callable
+    replicateUsingCallable(conn, entries);
 
     Region region = rs0.getRegion(hriSecondary.getEncodedName());
     HTU.verifyNumericRows(region, f, 0, 1000);
@@ -216,36 +216,34 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
   public void testReplayCallableWithRegionMove() throws Exception {
     // tests replaying the edits to a secondary region replica using the Callable directly while
     // the region is moved to another location.It tests handling of RME.
-    try (AsyncClusterConnection conn = ClusterConnectionFactory
-      .createAsyncClusterConnection(HTU.getConfiguration(), null, User.getCurrent())) {
-      openRegion(HTU, rs0, hriSecondary);
-      // load some data to primary
-      HTU.loadNumericRows(table, f, 0, 1000);
+    AsyncClusterConnection conn = HTU.getAsyncConnection();
+    openRegion(HTU, rs0, hriSecondary);
+    // load some data to primary
+    HTU.loadNumericRows(table, f, 0, 1000);
 
-      Assert.assertEquals(1000, entries.size());
+    Assert.assertEquals(1000, entries.size());
 
-      // replay the edits to the secondary using replay callable
-      replicateUsingCallable(conn, entries);
+    // replay the edits to the secondary using replay callable
+    replicateUsingCallable(conn, entries);
 
-      Region region = rs0.getRegion(hriSecondary.getEncodedName());
-      HTU.verifyNumericRows(region, f, 0, 1000);
+    Region region = rs0.getRegion(hriSecondary.getEncodedName());
+    HTU.verifyNumericRows(region, f, 0, 1000);
 
-      HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
+    HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
 
-      // move the secondary region from RS0 to RS1
-      closeRegion(HTU, rs0, hriSecondary);
-      openRegion(HTU, rs1, hriSecondary);
+    // move the secondary region from RS0 to RS1
+    closeRegion(HTU, rs0, hriSecondary);
+    openRegion(HTU, rs1, hriSecondary);
 
-      // replicate the new data
-      replicateUsingCallable(conn, entries);
+    // replicate the new data
+    replicateUsingCallable(conn, entries);
 
-      region = rs1.getRegion(hriSecondary.getEncodedName());
-      // verify the new data. old data may or may not be there
-      HTU.verifyNumericRows(region, f, 1000, 2000);
+    region = rs1.getRegion(hriSecondary.getEncodedName());
+    // verify the new data. old data may or may not be there
+    HTU.verifyNumericRows(region, f, 1000, 2000);
 
-      HTU.deleteNumericRows(table, f, 0, 2000);
-      closeRegion(HTU, rs1, hriSecondary);
-    }
+    HTU.deleteNumericRows(table, f, 0, 2000);
+    closeRegion(HTU, rs1, hriSecondary);
   }
 
   @Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
index 90fe71e..59ca67d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
@@ -129,7 +129,7 @@ public class TestHBaseFsckEncryption {
       table.close();
     }
     // Flush it
-    TEST_UTIL.getAdmin().flush(tableDescriptor.getTableName());
+    TEST_UTIL.flush(tableDescriptor.getTableName());
 
     // Verify we have encrypted store files on disk
     final List<Path> paths = findStorefilePaths(tableDescriptor.getTableName());
diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index 807bf6f..82d4621 100644
--- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -17,24 +17,16 @@
  */
 package org.apache.hadoop.hbase.zookeeper;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
 
@@ -47,9 +39,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaReg
  * <p/>
  * Meta region location is set by <code>RegionServerServices</code>. This class doesn't use ZK
  * watchers, rather accesses ZK directly.
- * <p/>
- * TODO: rewrite using RPC calls to master to find out about hbase:meta.
+ * @deprecated Since 3.0.0, will be removed in 4.0.0. Now we store the meta location in the local
+ *             store of master, the location on zk is only a mirror of the first meta region to keep
+ *             compatibility.
  */
+@Deprecated
 @InterfaceAudience.Private
 public final class MetaTableLocator {
   private static final Logger LOG = LoggerFactory.getLogger(MetaTableLocator.class);
@@ -58,166 +52,16 @@ public final class MetaTableLocator {
   }
 
   /**
-   * @param zkw ZooKeeper watcher to be used
-   * @return meta table regions and their locations.
-   */
-  public static List<Pair<RegionInfo, ServerName>> getMetaRegionsAndLocations(ZKWatcher zkw) {
-    return getMetaRegionsAndLocations(zkw, RegionInfo.DEFAULT_REPLICA_ID);
-  }
-
-  /**
-   * Gets the meta regions and their locations for the given path and replica ID.
-   *
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param replicaId the ID of the replica
-   * @return meta table regions and their locations.
-   */
-  public static List<Pair<RegionInfo, ServerName>> getMetaRegionsAndLocations(ZKWatcher zkw,
-      int replicaId) {
-    ServerName serverName = getMetaRegionLocation(zkw, replicaId);
-    List<Pair<RegionInfo, ServerName>> list = new ArrayList<>(1);
-    list.add(new Pair<>(RegionReplicaUtil.getRegionInfoForReplica(
-        RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), serverName));
-    return list;
-  }
-
-  /**
-   * Gets the meta regions for the given path with the default replica ID.
-   *
-   * @param zkw ZooKeeper watcher to be used
-   * @return List of meta regions
-   */
-  public static List<RegionInfo> getMetaRegions(ZKWatcher zkw) {
-    return getMetaRegions(zkw, RegionInfo.DEFAULT_REPLICA_ID);
-  }
-
-  /**
-   * Gets the meta regions for the given path and replica ID.
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param replicaId the ID of the replica
-   * @return List of meta regions
-   */
-  public static List<RegionInfo> getMetaRegions(ZKWatcher zkw, int replicaId) {
-    List<Pair<RegionInfo, ServerName>> result;
-    result = getMetaRegionsAndLocations(zkw, replicaId);
-    return getListOfRegionInfos(result);
-  }
-
-  private static List<RegionInfo> getListOfRegionInfos(
-      final List<Pair<RegionInfo, ServerName>> pairs) {
-    if (pairs == null || pairs.isEmpty()) {
-      return Collections.emptyList();
-    }
-
-    List<RegionInfo> result = new ArrayList<>(pairs.size());
-    for (Pair<RegionInfo, ServerName> pair : pairs) {
-      result.add(pair.getFirst());
-    }
-    return result;
-  }
-
-  /**
-   * Gets the meta region location, if available.  Does not block.
-   * @param zkw zookeeper connection to use
-   * @return server name or null if we failed to get the data.
-   */
-  public static ServerName getMetaRegionLocation(final ZKWatcher zkw) {
-    try {
-      RegionState state = getMetaRegionState(zkw);
-      return state.isOpened() ? state.getServerName() : null;
-    } catch (KeeperException ke) {
-      return null;
-    }
-  }
-
-  /**
-   * Gets the meta region location, if available.  Does not block.
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param replicaId the ID of the replica
-   * @return server name
-   */
-  public static ServerName getMetaRegionLocation(final ZKWatcher zkw, int replicaId) {
-    try {
-      RegionState state = getMetaRegionState(zkw, replicaId);
-      return state.isOpened() ? state.getServerName() : null;
-    } catch (KeeperException ke) {
-      return null;
-    }
-  }
-
-  /**
-   * Gets the meta region location, if available, and waits for up to the specified timeout if not
-   * immediately available. Given the zookeeper notification could be delayed, we will try to get
-   * the latest data.
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param timeout maximum time to wait, in millis
-   * @return server name for server hosting meta region formatted as per {@link ServerName}, or null
-   *         if none available
-   * @throws InterruptedException if interrupted while waiting
-   * @throws NotAllMetaRegionsOnlineException if a meta or root region is not online
-   */
-  public static ServerName waitMetaRegionLocation(ZKWatcher zkw, long timeout)
-      throws InterruptedException, NotAllMetaRegionsOnlineException {
-    return waitMetaRegionLocation(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout);
-  }
-
-  /**
-   * Gets the meta region location, if available, and waits for up to the specified timeout if not
-   * immediately available. Given the zookeeper notification could be delayed, we will try to get
-   * the latest data.
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param replicaId the ID of the replica
-   * @param timeout maximum time to wait, in millis
-   * @return server name for server hosting meta region formatted as per {@link ServerName}, or null
-   *         if none available
-   * @throws InterruptedException if waiting for the socket operation fails
-   * @throws NotAllMetaRegionsOnlineException if a meta or root region is not online
-   */
-  public static ServerName waitMetaRegionLocation(ZKWatcher zkw, int replicaId, long timeout)
-      throws InterruptedException, NotAllMetaRegionsOnlineException {
-    try {
-      if (ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode) == -1) {
-        String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. " +
-          "There could be a mismatch with the one configured in the master.";
-        LOG.error(errorMsg);
-        throw new IllegalArgumentException(errorMsg);
-      }
-    } catch (KeeperException e) {
-      throw new IllegalStateException("KeeperException while trying to check baseZNode:", e);
-    }
-    ServerName sn = blockUntilAvailable(zkw, replicaId, timeout);
-
-    if (sn == null) {
-      throw new NotAllMetaRegionsOnlineException("Timed out; " + timeout + "ms");
-    }
-
-    return sn;
-  }
-
-  /**
-   * Sets the location of <code>hbase:meta</code> in ZooKeeper to the
-   * specified server address.
-   * @param zookeeper zookeeper reference
-   * @param serverName The server hosting <code>hbase:meta</code>
-   * @param state The region transition state
-   * @throws KeeperException unexpected zookeeper exception
-   */
-  public static void setMetaLocation(ZKWatcher zookeeper,
-      ServerName serverName, RegionState.State state) throws KeeperException {
-    setMetaLocation(zookeeper, serverName, RegionInfo.DEFAULT_REPLICA_ID, state);
-  }
-
-  /**
    * Sets the location of <code>hbase:meta</code> in ZooKeeper to the specified server address.
    * @param zookeeper reference to the {@link ZKWatcher} which also contains configuration and
-   *                  operation
+   *          operation
    * @param serverName the name of the server
    * @param replicaId the ID of the replica
    * @param state the state of the region
    * @throws KeeperException if a ZooKeeper operation fails
    */
   public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, int replicaId,
-      RegionState.State state) throws KeeperException {
+    RegionState.State state) throws KeeperException {
     if (serverName == null) {
       LOG.warn("Tried to set null ServerName in hbase:meta; skipping -- ServerName required");
       return;
@@ -226,15 +70,13 @@ public final class MetaTableLocator {
       serverName, state);
     // Make the MetaRegionServer pb and then get its bytes and save this as
     // the znode content.
-    MetaRegionServer pbrsr = MetaRegionServer.newBuilder()
-      .setServer(ProtobufUtil.toServerName(serverName))
-      .setRpcVersion(HConstants.RPC_CURRENT_VERSION)
-      .setState(state.convert()).build();
+    MetaRegionServer pbrsr =
+      MetaRegionServer.newBuilder().setServer(ProtobufUtil.toServerName(serverName))
+        .setRpcVersion(HConstants.RPC_CURRENT_VERSION).setState(state.convert()).build();
     byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
     try {
-      ZKUtil.setData(zookeeper,
-          zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data);
-    } catch(KeeperException.NoNodeException nne) {
+      ZKUtil.setData(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data);
+    } catch (KeeperException.NoNodeException nne) {
       if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) {
         LOG.debug("hbase:meta region location doesn't exist, create it");
       } else {
@@ -242,27 +84,19 @@ public final class MetaTableLocator {
             ", create it");
       }
       ZKUtil.createAndWatch(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId),
-              data);
+        data);
     }
   }
 
   /**
-   * Load the meta region state from the meta server ZNode.
-   */
-  public static RegionState getMetaRegionState(ZKWatcher zkw) throws KeeperException {
-    return getMetaRegionState(zkw, RegionInfo.DEFAULT_REPLICA_ID);
-  }
-
-  /**
    * Load the meta region state from the meta region server ZNode.
-   *
    * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
    * @param replicaId the ID of the replica
    * @return regionstate
    * @throws KeeperException if a ZooKeeper operation fails
    */
   public static RegionState getMetaRegionState(ZKWatcher zkw, int replicaId)
-      throws KeeperException {
+    throws KeeperException {
     RegionState regionState = null;
     try {
       byte[] data = ZKUtil.getData(zkw, zkw.getZNodePaths().getZNodeForReplica(replicaId));
@@ -274,110 +108,4 @@ public final class MetaTableLocator {
     }
     return regionState;
   }
-
-  /**
-   * Deletes the location of <code>hbase:meta</code> in ZooKeeper.
-   * @param zookeeper zookeeper reference
-   * @throws KeeperException unexpected zookeeper exception
-   */
-  public static void deleteMetaLocation(ZKWatcher zookeeper)
-    throws KeeperException {
-    deleteMetaLocation(zookeeper, RegionInfo.DEFAULT_REPLICA_ID);
-  }
-
-  public static void deleteMetaLocation(ZKWatcher zookeeper, int replicaId)
-    throws KeeperException {
-    if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) {
-      LOG.info("Deleting hbase:meta region location in ZooKeeper");
-    } else {
-      LOG.info("Deleting hbase:meta for {} region location in ZooKeeper", replicaId);
-    }
-    try {
-      // Just delete the node.  Don't need any watches.
-      ZKUtil.deleteNode(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId));
-    } catch(KeeperException.NoNodeException nne) {
-      // Has already been deleted
-    }
-  }
-  /**
-   * Wait until the primary meta region is available. Get the secondary locations as well but don't
-   * block for those.
-   *
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param timeout maximum time to wait in millis
-   * @param conf the {@link Configuration} to use
-   * @return ServerName or null if we timed out.
-   * @throws InterruptedException if waiting for the socket operation fails
-   */
-  public static List<ServerName> blockUntilAvailable(final ZKWatcher zkw, final long timeout,
-      Configuration conf) throws InterruptedException {
-    int numReplicasConfigured = 1;
-
-    List<ServerName> servers = new ArrayList<>();
-    // Make the blocking call first so that we do the wait to know
-    // the znodes are all in place or timeout.
-    ServerName server = blockUntilAvailable(zkw, timeout);
-
-    if (server == null) {
-      return null;
-    }
-
-    servers.add(server);
-
-    try {
-      List<String> metaReplicaNodes = zkw.getMetaReplicaNodes();
-      numReplicasConfigured = metaReplicaNodes.size();
-    } catch (KeeperException e) {
-      LOG.warn("Got ZK exception {}", e);
-    }
-    for (int replicaId = 1; replicaId < numReplicasConfigured; replicaId++) {
-      // return all replica locations for the meta
-      servers.add(getMetaRegionLocation(zkw, replicaId));
-    }
-    return servers;
-  }
-
-  /**
-   * Wait until the meta region is available and is not in transition.
-   * @param zkw zookeeper connection to use
-   * @param timeout maximum time to wait, in millis
-   * @return ServerName or null if we timed out.
-   * @throws InterruptedException if waiting for the socket operation fails
-   */
-  public static ServerName blockUntilAvailable(final ZKWatcher zkw, final long timeout)
-      throws InterruptedException {
-    return blockUntilAvailable(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout);
-  }
-
-  /**
-   * Wait until the meta region is available and is not in transition.
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and constants
-   * @param replicaId the ID of the replica
-   * @param timeout maximum time to wait in millis
-   * @return ServerName or null if we timed out.
-   * @throws InterruptedException if waiting for the socket operation fails
-   */
-  public static ServerName blockUntilAvailable(final ZKWatcher zkw, int replicaId,
-      final long timeout) throws InterruptedException {
-    if (timeout < 0) {
-      throw new IllegalArgumentException();
-    }
-
-    if (zkw == null) {
-      throw new IllegalArgumentException();
-    }
-
-    long startTime = EnvironmentEdgeManager.currentTime();
-    ServerName sn = null;
-    while (true) {
-      sn = getMetaRegionLocation(zkw, replicaId);
-      if (sn != null ||
-        (EnvironmentEdgeManager.currentTime() - startTime) >
-          timeout - HConstants.SOCKET_RETRY_WAIT_MS) {
-        break;
-      }
-      Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
-    }
-    return sn;
-  }
 }
diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index d124a9a..5bfb8bf 100644
--- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -1857,13 +1857,6 @@ public final class ZKUtil {
           sb.append("\n ").append(child);
         }
       }
-      sb.append("\nRegion server holding hbase:meta:");
-      sb.append("\n ").append(MetaTableLocator.getMetaRegionLocation(zkw));
-      int numMetaReplicas = zkw.getMetaReplicaNodes().size();
-      for (int i = 1; i < numMetaReplicas; i++) {
-        sb.append("\n replica" + i + ": "
-          + MetaTableLocator.getMetaRegionLocation(zkw, i));
-      }
       sb.append("\nRegion servers:");
       final List<String> rsChildrenNoWatchList =
               listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode);

[hbase] 09/09: HBASE-25013 Avoid reset the backup master root cache every time when syncing (#2392)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit b6c54e5cc4df5a722210d30232f6a78053171c52
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Mon Sep 21 20:32:15 2020 +0800

    HBASE-25013 Avoid reset the backup master root cache every time when syncing (#2392)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../hadoop/hbase/ClientMetaTableAccessor.java      |  23 ++++
 .../hadoop/hbase/client/ConnectionUtils.java       |  39 +-----
 .../apache/hadoop/hbase/client/MasterRegistry.java |  11 +-
 .../hadoop/hbase/client/ZKConnectionRegistry.java  |  25 +++-
 .../src/main/protobuf/server/master/Master.proto   |  14 ++
 .../org/apache/hadoop/hbase/MetaTableAccessor.java |  21 +--
 .../hbase/client/AsyncClusterConnection.java       |   3 +-
 .../hbase/client/AsyncClusterConnectionImpl.java   |  52 +++++--
 .../hadoop/hbase/coprocessor/MasterObserver.java   |  20 +++
 .../org/apache/hadoop/hbase/master/HMaster.java    |  69 +++-------
 .../hadoop/hbase/master/MasterCoprocessorHost.java |  19 +++
 .../hadoop/hbase/master/MasterRpcServices.java     |  62 +++++++--
 .../hadoop/hbase/master/MetaLocationCache.java     |  42 ++++--
 .../hbase/master/assignment/AssignmentManager.java |  33 ++---
 .../hbase/master/assignment/RegionStateStore.java  | 128 +++++++----------
 .../hadoop/hbase/master/region/MasterRegion.java   |   8 ++
 .../region/RegionScannerAsResultScanner.java       |  88 ++++++++++++
 .../hadoop/hbase/master/region/RootStore.java      | 153 +++++++++++++++++++++
 .../hbase/client/DummyAsyncClusterConnection.java  |   5 +-
 .../client/TestFailedMetaReplicaAssigment.java     |  10 +-
 .../hbase/master/TestBackupMasterSyncRoot.java     | 113 +++++++++++++++
 .../hbase/master/TestCloseAnOpeningRegion.java     |   6 +-
 .../hbase/master/TestClusterRestartFailover.java   |  10 +-
 .../hadoop/hbase/master/TestMetaLocationCache.java |  12 +-
 .../master/assignment/MockMasterServices.java      |   8 +-
 .../assignment/TestOpenRegionProcedureBackoff.java |  10 +-
 .../assignment/TestOpenRegionProcedureHang.java    |  10 +-
 .../assignment/TestRaceBetweenSCPAndDTP.java       |  10 +-
 .../assignment/TestRaceBetweenSCPAndTRSP.java      |  10 +-
 .../TestRegionAssignedToMultipleRegionServers.java |  10 +-
 .../assignment/TestReportOnlineRegionsRace.java    |  10 +-
 ...tReportRegionStateTransitionFromDeadServer.java |  10 +-
 .../TestReportRegionStateTransitionRetry.java      |  10 +-
 .../master/assignment/TestSCPGetRegionsRace.java   |  10 +-
 .../assignment/TestWakeUpUnexpectedProcedure.java  |  10 +-
 .../apache/hadoop/hbase/util/TestRegionMover2.java |  13 +-
 36 files changed, 768 insertions(+), 319 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
index ed0d9b4..f19c913 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableState;
@@ -486,4 +487,26 @@ public final class ClientMetaTableAccessor {
     }
     return stopRow;
   }
+
+  /**
+   * Visit all the result of the given {@code scanner}.
+   * <p/>
+   * It is the caller's duty to close the {@code scanner}.
+   * @param maxRows maximum rows to visit, or -1 means unlimited.
+   */
+  public static void visit(ResultScanner scanner, Visitor visitor, int maxRows) throws IOException {
+    for (int rows = 0;;) {
+      Result result = scanner.next();
+      if (result == null) {
+        return;
+      }
+      if (!visitor.visit(result)) {
+        return;
+      }
+      rows++;
+      if (maxRows > 0 && rows >= maxRows) {
+        return;
+      }
+    }
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 4697153..4fa08c6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -37,7 +37,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Function;
 import java.util.function.Supplier;
-import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
@@ -52,7 +51,6 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcClient;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.security.User;
@@ -76,8 +74,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsRequest;
 
 /**
  * Utility used by client connections.
@@ -641,8 +637,8 @@ public final class ConnectionUtils {
     }
   }
 
-  public static void tryClearMasterStubCache(IOException error,
-    ClientMetaService.Interface currentStub, AtomicReference<ClientMetaService.Interface> stub) {
+  public static <T> void tryClearMasterStubCache(IOException error,
+    T currentStub, AtomicReference<T> stub) {
     if (ClientExceptionsUtil.isConnectionException(error) ||
       error instanceof ServerNotRunningYetException) {
       stub.compareAndSet(currentStub, null);
@@ -712,35 +708,4 @@ public final class ConnectionUtils {
       }
     }
   }
-
-  public static CompletableFuture<List<HRegionLocation>> getAllMetaRegionLocations(
-    boolean excludeOfflinedSplitParents,
-    CompletableFuture<ClientMetaService.Interface> getStubFuture,
-    AtomicReference<ClientMetaService.Interface> stubRef,
-    RpcControllerFactory rpcControllerFactory, int callTimeoutMs) {
-    CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
-    addListener(getStubFuture, (stub, error) -> {
-      if (error != null) {
-        future.completeExceptionally(error);
-        return;
-      }
-      HBaseRpcController controller = rpcControllerFactory.newController();
-      if (callTimeoutMs > 0) {
-        controller.setCallTimeout(callTimeoutMs);
-      }
-      stub.getAllMetaRegionLocations(controller, GetAllMetaRegionLocationsRequest.newBuilder()
-        .setExcludeOfflinedSplitParents(excludeOfflinedSplitParents).build(), resp -> {
-          if (controller.failed()) {
-            IOException ex = controller.getFailed();
-            tryClearMasterStubCache(ex, stub, stubRef);
-            future.completeExceptionally(ex);
-            return;
-          }
-          List<HRegionLocation> locs = resp.getMetaLocationsList().stream()
-            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList());
-          future.complete(locs);
-        });
-    });
-    return future;
-  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
index 3475f74..a517e2c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
@@ -369,8 +369,12 @@ public class MasterRegistry implements ConnectionRegistry {
     LocateMetaRegionRequest request =
       LocateMetaRegionRequest.newBuilder().setRow(ByteString.copyFrom(row))
         .setLocateType(ProtobufUtil.toProtoRegionLocateType(locateType)).build();
-    return this.<LocateMetaRegionResponse> call((c, s, d) -> s.locateMetaRegion(c, request, d),
-      r -> true, "locateMeta()").thenApply(this::transformRegionLocations);
+    return this
+      .<LocateMetaRegionResponse> call((c, s, d) -> s.locateMetaRegion(c, request, d),
+        r -> r.getMetaLocationsList().stream()
+          .anyMatch(l -> l.hasRegionInfo() && l.hasServerName()),
+        "locateMeta()")
+      .thenApply(this::transformRegionLocations);
   }
 
   private List<HRegionLocation>
@@ -386,7 +390,8 @@ public class MasterRegistry implements ConnectionRegistry {
       .setExcludeOfflinedSplitParents(excludeOfflinedSplitParents).build();
     return this
       .<GetAllMetaRegionLocationsResponse> call(
-        (c, s, d) -> s.getAllMetaRegionLocations(c, request, d), r -> true,
+        (c, s, d) -> s.getAllMetaRegionLocations(c, request, d),
+        r -> r.getMetaLocationsCount() > 0,
         "getAllMetaRegionLocations(" + excludeOfflinedSplitParents + ")")
       .thenApply(this::transformRegionLocationList);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
index 66f9684..b1ef5ef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.tryClearMasterStubCache;
 import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
 import static org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.lengthOfPBMagic;
 import static org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture;
@@ -58,6 +59,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetAllMetaRegionLocationsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.LocateMetaRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
 
@@ -304,8 +306,27 @@ class ZKConnectionRegistry implements ConnectionRegistry {
   @Override
   public CompletableFuture<List<HRegionLocation>>
     getAllMetaRegionLocations(boolean excludeOfflinedSplitParents) {
-    return ConnectionUtils.getAllMetaRegionLocations(excludeOfflinedSplitParents, getStub(),
-      cachedStub, rpcControllerFactory, -1);
+    CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>();
+    addListener(getStub(), (stub, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      HBaseRpcController controller = rpcControllerFactory.newController();
+      stub.getAllMetaRegionLocations(controller, GetAllMetaRegionLocationsRequest.newBuilder()
+        .setExcludeOfflinedSplitParents(excludeOfflinedSplitParents).build(), resp -> {
+          if (controller.failed()) {
+            IOException ex = controller.getFailed();
+            tryClearMasterStubCache(ex, stub, cachedStub);
+            future.completeExceptionally(ex);
+            return;
+          }
+          List<HRegionLocation> locs = resp.getMetaLocationsList().stream()
+            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList());
+          future.complete(locs);
+        });
+    });
+    return future;
   }
 
   @Override
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
index 5302d51..153e004 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
@@ -1392,3 +1392,17 @@ service ClientMetaService {
   rpc GetAllMetaRegionLocations(GetAllMetaRegionLocationsRequest)
     returns(GetAllMetaRegionLocationsResponse);
 }
+
+message SyncRootRequest {
+  required int64 lastSyncSeqId = 1;
+}
+
+message SyncRootResponse {
+  required int64 lastModifiedSeqId = 1;
+  repeated RegionLocation meta_locations = 2;
+}
+
+service RootSyncService {
+  rpc SyncRoot(SyncRootRequest)
+    returns(SyncRootResponse);
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 385f2b9..8bf0fa0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -462,8 +462,7 @@ public final class MetaTableAccessor {
   public static void scanMeta(Connection connection, @Nullable final byte[] startRow,
     @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows,
     final ClientMetaTableAccessor.Visitor visitor) throws IOException {
-    int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE;
-    Scan scan = getMetaScan(connection.getConfiguration(), rowUpperLimit);
+    Scan scan = getMetaScan(connection.getConfiguration(), maxRows);
 
     for (byte[] family : type.getFamilies()) {
       scan.addFamily(family);
@@ -480,26 +479,12 @@ public final class MetaTableAccessor {
 
     if (LOG.isTraceEnabled()) {
       LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) +
-        " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit +
+        " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + maxRows +
         " with caching=" + scan.getCaching());
     }
-
-    int currentRow = 0;
     try (Table metaTable = getMetaHTable(connection)) {
       try (ResultScanner scanner = metaTable.getScanner(scan)) {
-        Result data;
-        while ((data = scanner.next()) != null) {
-          if (data.isEmpty()) {
-            continue;
-          }
-          // Break if visit returns false.
-          if (!visitor.visit(data)) {
-            break;
-          }
-          if (++currentRow >= rowUpperLimit) {
-            break;
-          }
-        }
+        ClientMetaTableAccessor.visit(scanner, visitor, maxRows);
       }
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index 8e64b4b..8b0f0a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -103,5 +103,6 @@ public interface AsyncClusterConnection extends AsyncConnection {
   /**
    * Fetch all meta region locations from active master, used by backup masters for caching.
    */
-  CompletableFuture<List<HRegionLocation>> getAllMetaRegionLocations(int callTimeoutMs);
+  CompletableFuture<Pair<Long, List<HRegionLocation>>> syncRoot(long lastSyncSeqId,
+    int callTimeoutMs);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
index cfe62db..8fdd439 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
@@ -17,17 +17,23 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.apache.hadoop.hbase.client.ConnectionUtils.tryClearMasterStubCache;
+import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
+
+import java.io.IOException;
 import java.net.SocketAddress;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
@@ -46,7 +52,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBul
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMetaService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RootSyncService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SyncRootRequest;
 
 /**
  * The implementation of AsyncClusterConnection.
@@ -54,11 +61,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClientMeta
 @InterfaceAudience.Private
 class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClusterConnection {
 
-  private final AtomicReference<ClientMetaService.Interface> cachedClientMetaStub =
+  private final AtomicReference<RootSyncService.Interface> cachedRootSyncStub =
     new AtomicReference<>();
 
-  private final AtomicReference<CompletableFuture<ClientMetaService.Interface>>
-    clientMetaStubMakeFuture = new AtomicReference<>();
+  private final AtomicReference<CompletableFuture<RootSyncService.Interface>>
+    rootSyncStubMakeFuture = new AtomicReference<>();
 
   public AsyncClusterConnectionImpl(Configuration conf, ConnectionRegistry registry,
     String clusterId, SocketAddress localAddress, User user) {
@@ -143,15 +150,38 @@ class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClu
       .call();
   }
 
-  private CompletableFuture<ClientMetaService.Interface> getClientMetaStub() {
-    return ConnectionUtils.getMasterStub(registry, cachedClientMetaStub, clientMetaStubMakeFuture,
-      rpcClient, user, rpcTimeout, TimeUnit.MILLISECONDS, ClientMetaService::newStub,
-      "ClientMetaService");
+  private CompletableFuture<RootSyncService.Interface> getRootSyncStub() {
+    return ConnectionUtils.getMasterStub(registry, cachedRootSyncStub, rootSyncStubMakeFuture,
+      rpcClient, user, rpcTimeout, TimeUnit.MILLISECONDS, RootSyncService::newStub,
+      "RootSyncService");
   }
 
   @Override
-  public CompletableFuture<List<HRegionLocation>> getAllMetaRegionLocations(int callTimeoutMs) {
-    return ConnectionUtils.getAllMetaRegionLocations(false, getClientMetaStub(),
-      cachedClientMetaStub, rpcControllerFactory, callTimeoutMs);
+  public CompletableFuture<Pair<Long, List<HRegionLocation>>> syncRoot(long lastSyncSeqId,
+    int callTimeoutMs) {
+    CompletableFuture<Pair<Long, List<HRegionLocation>>> future = new CompletableFuture<>();
+    addListener(getRootSyncStub(), (stub, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      HBaseRpcController controller = rpcControllerFactory.newController();
+      if (callTimeoutMs > 0) {
+        controller.setCallTimeout(callTimeoutMs);
+      }
+      stub.syncRoot(controller,
+        SyncRootRequest.newBuilder().setLastSyncSeqId(lastSyncSeqId).build(), resp -> {
+          if (controller.failed()) {
+            IOException ex = controller.getFailed();
+            tryClearMasterStubCache(ex, stub, cachedRootSyncStub);
+            future.completeExceptionally(ex);
+            return;
+          }
+          List<HRegionLocation> locs = resp.getMetaLocationsList().stream()
+            .map(ProtobufUtil::toRegionLocation).collect(Collectors.toList());
+          future.complete(Pair.newPair(resp.getLastModifiedSeqId(), locs));
+        });
+    });
+    return future;
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 8ca8972..e6197ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -1824,4 +1824,24 @@ public interface MasterObserver {
   default void postGetAllMetaRegionLocations(ObserverContext<MasterCoprocessorEnvironment> ctx,
     boolean excludeOfflinedSplitParents, List<HRegionLocation> locs) {
   }
+
+  /**
+   * Called before syncing root
+   * @param ctx ctx the coprocessor instance's environment
+   * @param lastSyncSeqId the sequence id when we call sync root last time
+   */
+  default void preSyncRoot(ObserverContext<MasterCoprocessorEnvironment> ctx, long lastSyncSeqId) {
+  }
+
+  /**
+   * Called before syncing root
+   * @param ctx ctx the coprocessor instance's environment
+   * @param lastSyncSeqId the sequence id when we call sync root last time
+   * @param lastModifiedSeqId the sequence id for this sync operation, it could be less than or
+   *          equal to {@code lastSyncSeqId}, then it usually means we do not sync anything.
+   * @param locs the locations of all meta regions, including meta replicas if any.
+   */
+  default void postSyncRoot(ObserverContext<MasterCoprocessorEnvironment> ctx, long lastSyncSeqId,
+    long lastModifiedSeqId, List<HRegionLocation> locs) {
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 79a3f7f..3684c0e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.hbase.client.RegionLocateType;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
@@ -149,6 +150,7 @@ import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
 import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
 import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
 import org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
 import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
@@ -411,7 +413,10 @@ public class HMaster extends HRegionServer implements MasterServices {
   private ProcedureStore procedureStore;
 
   // the master local storage to store procedure data, root table, etc.
-  private MasterRegion masterRegion;
+  MasterRegion masterRegion;
+
+  // a wrapper of MasterRegion to provide root table storage
+  private RootStore rootStore;
 
   // handle table states
   private TableStateManager tableStateManager;
@@ -795,8 +800,8 @@ public class HMaster extends HRegionServer implements MasterServices {
   // Will be overriden in test to inject customized AssignmentManager
   @InterfaceAudience.Private
   protected AssignmentManager createAssignmentManager(MasterServices master,
-    MasterRegion masterRegion) {
-    return new AssignmentManager(master, masterRegion);
+    RootStore rootStore) {
+    return new AssignmentManager(master, rootStore);
   }
 
   /**
@@ -959,6 +964,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     // initialize master local region
     masterRegion = MasterRegionFactory.create(this);
+    rootStore = new RootStore(masterRegion);
 
     tryMigrateRootTableFromZooKeeper();
 
@@ -972,7 +978,7 @@ public class HMaster extends HRegionServer implements MasterServices {
         .collect(Collectors.groupingBy(p -> p.getClass()));
 
     // Create Assignment Manager
-    this.assignmentManager = createAssignmentManager(this, masterRegion);
+    this.assignmentManager = createAssignmentManager(this, rootStore);
     this.assignmentManager.start();
     // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as
     // completed, it could still be in the procedure list. This is a bit strange but is another
@@ -3982,16 +3988,12 @@ public class HMaster extends HRegionServer implements MasterServices {
     }
     Scan scan =
       CatalogFamilyFormat.createRegionLocateScan(TableName.META_TABLE_NAME, row, locateType, 1);
-    try (RegionScanner scanner = masterRegion.getScanner(scan)) {
-      boolean moreRows;
-      List<Cell> cells = new ArrayList<>();
-      do {
-        moreRows = scanner.next(cells);
-        if (cells.isEmpty()) {
-          continue;
+    try (ResultScanner scanner = rootStore.getScanner(scan)) {
+      for (;;) {
+        Result result = scanner.next();
+        if (result == null) {
+          break;
         }
-        Result result = Result.create(cells);
-        cells.clear();
         RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result);
         if (locs == null || locs.getDefaultRegionLocation() == null) {
           LOG.warn("No location found when locating meta region with row='{}', locateType={}",
@@ -4009,7 +4011,7 @@ public class HMaster extends HRegionServer implements MasterServices {
           continue;
         }
         return locs;
-      } while (moreRows);
+      }
       LOG.warn("No location available when locating meta region with row='{}', locateType={}",
         Bytes.toStringBinary(row), locateType);
       return null;
@@ -4018,39 +4020,10 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   public List<RegionLocations> getAllMetaRegionLocations(boolean excludeOfflinedSplitParents)
     throws IOException {
-    Scan scan = new Scan().addFamily(HConstants.CATALOG_FAMILY);
-    List<RegionLocations> list = new ArrayList<>();
-    try (RegionScanner scanner = masterRegion.getScanner(scan)) {
-      boolean moreRows;
-      List<Cell> cells = new ArrayList<>();
-      do {
-        moreRows = scanner.next(cells);
-        if (cells.isEmpty()) {
-          continue;
-        }
-        Result result = Result.create(cells);
-        cells.clear();
-        RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result);
-        if (locs == null) {
-          LOG.warn("No locations in {}", result);
-          continue;
-        }
-        HRegionLocation loc = locs.getRegionLocation();
-        if (loc == null) {
-          LOG.warn("No non null location in {}", result);
-          continue;
-        }
-        RegionInfo info = loc.getRegion();
-        if (info == null) {
-          LOG.warn("No serialized RegionInfo in {}", result);
-          continue;
-        }
-        if (excludeOfflinedSplitParents && info.isSplitParent()) {
-          continue;
-        }
-        list.add(locs);
-      } while (moreRows);
-    }
-    return list;
+    return rootStore.getAllMetaRegionLocations(excludeOfflinedSplitParents);
+  }
+
+  public Pair<Long, List<RegionLocations>> syncRoot(long lastSyncSeqId) throws IOException {
+    return rootStore.sync(lastSyncSeqId);
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 728da5c..3289c9d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -2078,4 +2078,23 @@ public class MasterCoprocessorHost
       }
     });
   }
+
+  public void preSyncRoot(long lastSyncSeqId) throws IOException {
+    execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
+      @Override
+      public void call(MasterObserver observer) throws IOException {
+        observer.preSyncRoot(this, lastSyncSeqId);
+      }
+    });
+  }
+
+  public void postSyncRoot(long lastSyncSeqId, long lastModifiedSeqId, List<HRegionLocation> locs)
+    throws IOException {
+    execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
+      @Override
+      public void call(MasterObserver observer) throws IOException {
+        observer.postSyncRoot(this, lastSyncSeqId, lastModifiedSeqId, locs);
+      }
+    });
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index aa7dfab..92019a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -294,6 +294,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Recommissi
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RootSyncService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
@@ -329,6 +330,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExce
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SyncRootRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SyncRootResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
@@ -411,9 +414,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.
  */
 @InterfaceAudience.Private
 @SuppressWarnings("deprecation")
-public class MasterRpcServices extends RSRpcServices implements MasterService.BlockingInterface,
-  RegionServerStatusService.BlockingInterface, LockService.BlockingInterface,
-  HbckService.BlockingInterface, ClientMetaService.BlockingInterface {
+public class MasterRpcServices extends RSRpcServices
+  implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
+  LockService.BlockingInterface, HbckService.BlockingInterface, ClientMetaService.BlockingInterface,
+  RootSyncService.BlockingInterface {
 
   private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName());
   private static final Logger AUDITLOG =
@@ -546,7 +550,7 @@ public class MasterRpcServices extends RSRpcServices implements MasterService.Bl
    */
   @Override
   protected List<BlockingServiceAndInterface> getServices() {
-    List<BlockingServiceAndInterface> bssi = new ArrayList<>(5);
+    List<BlockingServiceAndInterface> bssi = new ArrayList<>();
     bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this),
       MasterService.BlockingInterface.class));
     bssi.add(
@@ -558,6 +562,8 @@ public class MasterRpcServices extends RSRpcServices implements MasterService.Bl
       HbckService.BlockingInterface.class));
     bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this),
       ClientMetaService.BlockingInterface.class));
+    bssi.add(new BlockingServiceAndInterface(RootSyncService.newReflectiveBlockingService(this),
+      RootSyncService.BlockingInterface.class));
     bssi.addAll(super.getServices());
     return bssi;
   }
@@ -3502,6 +3508,18 @@ public class MasterRpcServices extends RSRpcServices implements MasterService.Bl
     }
   }
 
+  private static List<HRegionLocation> locs2Loc(List<RegionLocations> locs) {
+    List<HRegionLocation> list = new ArrayList<>();
+    for (RegionLocations ls : locs) {
+      for (HRegionLocation loc : ls) {
+        if (loc != null) {
+          list.add(loc);
+        }
+      }
+    }
+    return list;
+  }
+
   @Override
   public GetAllMetaRegionLocationsResponse getAllMetaRegionLocations(RpcController controller,
     GetAllMetaRegionLocationsRequest request) throws ServiceException {
@@ -3516,16 +3534,7 @@ public class MasterRpcServices extends RSRpcServices implements MasterService.Bl
         list = cache.getAllMetaRegionLocations(excludeOfflinedSplitParents);
       } else {
         List<RegionLocations> locs = master.getAllMetaRegionLocations(excludeOfflinedSplitParents);
-        list = new ArrayList<>();
-        if (locs != null) {
-          for (RegionLocations ls : locs) {
-            for (HRegionLocation loc : ls) {
-              if (loc != null) {
-                list.add(loc);
-              }
-            }
-          }
-        }
+        list = locs2Loc(locs);
       }
       GetAllMetaRegionLocationsResponse.Builder builder =
         GetAllMetaRegionLocationsResponse.newBuilder();
@@ -3541,4 +3550,29 @@ public class MasterRpcServices extends RSRpcServices implements MasterService.Bl
       throw new ServiceException(e);
     }
   }
+
+  @Override
+  public SyncRootResponse syncRoot(RpcController controller, SyncRootRequest request)
+    throws ServiceException {
+    long lastSyncSeqId = request.getLastSyncSeqId();
+    try {
+      master.checkServiceStarted();
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preSyncRoot(lastSyncSeqId);
+      }
+      Pair<Long, List<RegionLocations>> pair = master.syncRoot(lastSyncSeqId);
+      List<HRegionLocation> locs = locs2Loc(pair.getSecond());
+      SyncRootResponse.Builder builder = SyncRootResponse.newBuilder();
+      builder.setLastModifiedSeqId(pair.getFirst());
+      for (HRegionLocation loc : locs) {
+        builder.addMetaLocations(ProtobufUtil.toRegionLocation(loc));
+      }
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postSyncRoot(lastSyncSeqId, pair.getFirst(), locs);
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaLocationCache.java
index ecf3323..f79a8c5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaLocationCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaLocationCache.java
@@ -30,7 +30,9 @@ import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaCellComparator;
 import org.apache.hadoop.hbase.RegionLocations;
@@ -48,7 +50,7 @@ import org.slf4j.LoggerFactory;
  * A cache of meta region locations.
  */
 @InterfaceAudience.Private
-class MetaLocationCache implements Stoppable {
+public class MetaLocationCache implements Stoppable {
 
   private static final Logger LOG = LoggerFactory.getLogger(MetaLocationCache.class);
 
@@ -64,13 +66,16 @@ class MetaLocationCache implements Stoppable {
   // default timeout 1 second
   private static final int DEFAULT_FETCH_TIMEOUT_MS = 1000;
 
-  private static final class CacheHolder {
+  static final class CacheHolder {
+
+    final long lastSyncSeqId;
 
     final NavigableMap<byte[], RegionLocations> cache;
 
     final List<HRegionLocation> all;
 
-    CacheHolder(List<HRegionLocation> all) {
+    CacheHolder(long lastSyncSeqId, List<HRegionLocation> all) {
+      this.lastSyncSeqId = lastSyncSeqId;
       this.all = Collections.unmodifiableList(all);
       NavigableMap<byte[], SortedSet<HRegionLocation>> startKeyToLocs =
         new TreeMap<>(MetaCellComparator.ROW_COMPARATOR);
@@ -89,7 +94,9 @@ class MetaLocationCache implements Stoppable {
     }
   }
 
-  private volatile CacheHolder holder;
+  final AtomicReference<CacheHolder> holder = new AtomicReference<>();
+
+  private final ScheduledChore refreshChore;
 
   private volatile boolean stopped = false;
 
@@ -98,34 +105,42 @@ class MetaLocationCache implements Stoppable {
       master.getConfiguration().getInt(SYNC_INTERVAL_SECONDS, DEFAULT_SYNC_INTERVAL_SECONDS);
     int fetchTimeoutMs =
       master.getConfiguration().getInt(FETCH_TIMEOUT_MS, DEFAULT_FETCH_TIMEOUT_MS);
-    master.getChoreService().scheduleChore(new ScheduledChore(
-      getClass().getSimpleName() + "-Sync-Chore", this, syncIntervalSeconds, 0, TimeUnit.SECONDS) {
+    refreshChore = new ScheduledChore(getClass().getSimpleName() + "-Sync-Chore", this,
+      syncIntervalSeconds, 0, TimeUnit.SECONDS) {
 
       @Override
       protected void chore() {
         AsyncClusterConnection conn = master.getAsyncClusterConnection();
         if (conn != null) {
-          addListener(conn.getAllMetaRegionLocations(fetchTimeoutMs), (locs, error) -> {
+          final CacheHolder ch = holder.get();
+          long lastSyncSeqId = ch != null ? ch.lastSyncSeqId : HConstants.NO_SEQNUM;
+          addListener(conn.syncRoot(lastSyncSeqId, fetchTimeoutMs), (resp, error) -> {
             if (error != null) {
-              LOG.warn("Failed to fetch all meta region locations from active master", error);
+              LOG.warn("Failed to sync root data from active master", error);
               return;
             }
-            holder = new CacheHolder(locs);
+            long lastModifiedSeqId = resp.getFirst().longValue();
+            if (ch == null || lastModifiedSeqId > ch.lastSyncSeqId && holder.get() == ch) {
+              // since we may trigger cache refresh when locating, here we use CAS to avoid race
+              holder.compareAndSet(ch, new CacheHolder(lastModifiedSeqId, resp.getSecond()));
+            }
           });
         }
       }
-    });
+    };
+    master.getChoreService().scheduleChore(refreshChore);
   }
 
-  RegionLocations locateMeta(byte[] row, RegionLocateType locateType) {
+  public RegionLocations locateMeta(byte[] row, RegionLocateType locateType) {
     if (locateType == RegionLocateType.AFTER) {
       // as we know the exact row after us, so we can just create the new row, and use the same
       // algorithm to locate it.
       row = Arrays.copyOf(row, row.length + 1);
       locateType = RegionLocateType.CURRENT;
     }
-    CacheHolder holder = this.holder;
+    CacheHolder holder = this.holder.get();
     if (holder == null) {
+      refreshChore.triggerNow();
       return null;
     }
     return locateType.equals(RegionLocateType.BEFORE) ?
@@ -134,8 +149,9 @@ class MetaLocationCache implements Stoppable {
   }
 
   List<HRegionLocation> getAllMetaRegionLocations(boolean excludeOfflinedSplitParents) {
-    CacheHolder holder = this.holder;
+    CacheHolder holder = this.holder.get();
     if (holder == null) {
+      refreshChore.triggerNow();
       return Collections.emptyList();
     }
     if (!excludeOfflinedSplitParents) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index fdaa460..87554bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -34,7 +34,6 @@ import java.util.concurrent.locks.ReentrantLock;
 import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CatalogFamilyFormat;
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
@@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
@@ -67,13 +67,12 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
 import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.SequenceId;
 import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -195,21 +194,21 @@ public class AssignmentManager {
   private final int assignMaxAttempts;
   private final int assignRetryImmediatelyMaxAttempts;
 
-  private final MasterRegion masterRegion;
+  private final RootStore rootStore;
 
   private final Object checkIfShouldMoveSystemRegionLock = new Object();
 
   private Thread assignThread;
 
-  public AssignmentManager(MasterServices master, MasterRegion masterRegion) {
-    this(master, masterRegion, new RegionStateStore(master, masterRegion));
+  public AssignmentManager(MasterServices master, RootStore rootStore) {
+    this(master, rootStore, new RegionStateStore(master, rootStore));
   }
 
-  AssignmentManager(MasterServices master, MasterRegion masterRegion, RegionStateStore stateStore) {
+  AssignmentManager(MasterServices master, RootStore rootStore, RegionStateStore stateStore) {
     this.master = master;
     this.regionStateStore = stateStore;
     this.metrics = new MetricsAssignmentManager();
-    this.masterRegion = masterRegion;
+    this.rootStore = rootStore;
 
     final Configuration conf = master.getConfiguration();
 
@@ -252,17 +251,13 @@ public class AssignmentManager {
     // Start the Assignment Thread
     startAssignmentThread();
     // load meta region states.
-    try (RegionScanner scanner =
-      masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
-      List<Cell> cells = new ArrayList<>();
-      boolean moreRows;
-      do {
-        moreRows = scanner.next(cells);
-        if (cells.isEmpty()) {
-          continue;
+    try (ResultScanner scanner =
+      rootStore.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
+      for(;;) {
+        Result result = scanner.next();
+        if (result == null) {
+          break;
         }
-        Result result = Result.create(cells);
-        cells.clear();
         RegionStateStore
           .visitMetaEntry((r, regionInfo, state, regionLocation, lastHost, openSeqNum) -> {
             RegionStateNode regionNode = regionStates.getOrCreateRegionStateNode(regionInfo);
@@ -287,7 +282,7 @@ public class AssignmentManager {
             }
             LOG.debug("Loaded hbase:meta {}", regionNode);
           }, result);
-      } while (moreRows);
+      }
     }
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 2ffd232..01445d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -56,16 +56,17 @@ import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.wal.WALSplitUtil;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -92,11 +93,11 @@ public class RegionStateStore {
 
   private final MasterServices master;
 
-  private final MasterRegion masterRegion;
+  private final RootStore rootStore;
 
-  public RegionStateStore(MasterServices master, MasterRegion masterRegion) {
+  public RegionStateStore(MasterServices master, RootStore rootStore) {
     this.master = master;
-    this.masterRegion = masterRegion;
+    this.rootStore = rootStore;
   }
 
   @FunctionalInterface
@@ -229,21 +230,9 @@ public class RegionStateStore {
     // scan meta first
     MetaTableAccessor.fullScanRegions(master.getConnection(), visitor);
     // scan root
-    try (RegionScanner scanner =
-      masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
-      boolean moreRows;
-      List<Cell> cells = new ArrayList<>();
-      do {
-        moreRows = scanner.next(cells);
-        if (cells.isEmpty()) {
-          continue;
-        }
-        Result result = Result.create(cells);
-        cells.clear();
-        if (!visitor.visit(result)) {
-          break;
-        }
-      } while (moreRows);
+    try (ResultScanner scanner =
+      rootStore.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
+      ClientMetaTableAccessor.visit(scanner, visitor, -1);
     }
   }
 
@@ -261,7 +250,7 @@ public class RegionStateStore {
     throws IOException {
     try {
       if (regionInfo.isMetaRegion()) {
-        masterRegion.update(r -> r.put(put));
+        rootStore.put(put);
       } else {
         try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
           table.put(put);
@@ -292,11 +281,7 @@ public class RegionStateStore {
   private void multiMutate(RegionInfo ri, List<Mutation> mutations) throws IOException {
     debugLogMutations(mutations);
     if (ri.isMetaRegion()) {
-      masterRegion.update(region -> {
-        List<byte[]> rowsToLock =
-          mutations.stream().map(Mutation::getRow).collect(Collectors.toList());
-        region.mutateRowsWithLocks(mutations, rowsToLock, HConstants.NO_NONCE, HConstants.NO_NONCE);
-      });
+      rootStore.multiMutate(mutations);
     } else {
       byte[] row =
         Bytes.toBytes(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionNameAsString() +
@@ -333,7 +318,7 @@ public class RegionStateStore {
     Get get =
       new Get(CatalogFamilyFormat.getMetaKeyForRegion(region)).addFamily(HConstants.CATALOG_FAMILY);
     if (region.isMetaRegion()) {
-      return masterRegion.get(get);
+      return rootStore.get(get);
     } else {
       try (Table table = getMetaTable()) {
         return table.get(get);
@@ -495,7 +480,7 @@ public class RegionStateStore {
     }
     debugLogMutation(delete);
     if (mergeRegion.isMetaRegion()) {
-      masterRegion.update(r -> r.delete(delete));
+      rootStore.delete(delete);
     } else {
       try (Table table = getMetaTable()) {
         table.delete(delete);
@@ -565,9 +550,7 @@ public class RegionStateStore {
     if (!metaRegions.isEmpty()) {
       List<Delete> deletes = makeDeleteRegionInfos(metaRegions, ts);
       debugLogMutations(deletes);
-      for (Delete d : deletes) {
-        masterRegion.update(r -> r.delete(d));
-      }
+      rootStore.delete(deletes);
       LOG.info("Deleted {} regions from ROOT", metaRegions.size());
       LOG.debug("Deleted regions: {}", metaRegions);
     }
@@ -610,64 +593,57 @@ public class RegionStateStore {
       .addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
   }
 
-  private Delete deleteRegionReplicas(Result result, int oldReplicaCount, int newReplicaCount,
-    long now) {
-    RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result);
-    if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) {
-      return null;
-    }
-    Delete delete = new Delete(result.getRow());
-    for (int i = newReplicaCount; i < oldReplicaCount; i++) {
-      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), now);
-      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), now);
-      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), now);
-      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), now);
-      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getRegionStateColumn(i),
-        now);
+  private List<Delete> deleteRegionReplicas(ResultScanner scanner, int oldReplicaCount,
+    int newReplicaCount, long now) throws IOException {
+    List<Delete> deletes = new ArrayList<>();
+    for (;;) {
+      Result result = scanner.next();
+      if (result == null) {
+        break;
+      }
+      RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result);
+      if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) {
+        continue;
+      }
+      Delete delete = new Delete(result.getRow());
+      for (int i = newReplicaCount; i < oldReplicaCount; i++) {
+        delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), now);
+        delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), now);
+        delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i),
+          now);
+        delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i),
+          now);
+        delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getRegionStateColumn(i),
+          now);
+      }
+      deletes.add(delete);
     }
-    return delete;
+    return deletes;
   }
 
   public void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount)
     throws IOException {
     Scan scan = getScanForUpdateRegionReplicas(tableName);
-    List<Delete> deletes = new ArrayList<>();
     long now = EnvironmentEdgeManager.currentTime();
     if (TableName.isMetaTableName(tableName)) {
-      try (RegionScanner scanner = masterRegion.getScanner(scan)) {
-        List<Cell> cells = new ArrayList<>();
-        boolean moreRows;
-        do {
-          cells.clear();
-          moreRows = scanner.next(cells);
-          if (cells.isEmpty()) {
-            continue;
-          }
-          Result result = Result.create(cells);
-          Delete delete = deleteRegionReplicas(result, oldReplicaCount, newReplicaCount, now);
-          if (delete != null) {
-            deletes.add(delete);
-          }
-        } while (moreRows);
+      List<Delete> deletes;
+      try (ResultScanner scanner = rootStore.getScanner(scan)) {
+        deletes = deleteRegionReplicas(scanner, oldReplicaCount, newReplicaCount, now);
       }
       debugLogMutations(deletes);
-      masterRegion.update(r -> {
-        for (Delete d : deletes) {
-          r.delete(d);
+      rootStore.delete(deletes);
+      // also delete the mirrored location on zk
+      ZKWatcher zk = master.getZooKeeper();
+      try {
+        for (int i = newReplicaCount; i < oldReplicaCount; i++) {
+          ZKUtil.deleteNode(zk, zk.getZNodePaths().getZNodeForReplica(i));
         }
-      });
+      } catch (KeeperException e) {
+        throw new IOException(e);
+      }
     } else {
       try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) {
-        for (;;) {
-          Result result = scanner.next();
-          if (result == null) {
-            break;
-          }
-          Delete delete = deleteRegionReplicas(result, oldReplicaCount, newReplicaCount, now);
-          if (delete != null) {
-            deletes.add(delete);
-          }
-        }
+        List<Delete> deletes = deleteRegionReplicas(scanner, oldReplicaCount, newReplicaCount, now);
         debugLogMutations(deletes);
         metaTable.delete(deletes);
       }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
index c1d9f47..2a35732 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
@@ -138,6 +138,14 @@ public final class MasterRegion {
     return region.getScanner(scan);
   }
 
+  WAL getWAL() {
+    return region.getWAL();
+  }
+
+  public long getReadPoint() {
+    return region.getMVCC().getReadPoint();
+  }
+
   public FlushResult flush(boolean force) throws IOException {
     return region.flush(force);
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/RegionScannerAsResultScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/RegionScannerAsResultScanner.java
new file mode 100644
index 0000000..f7ac315
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/RegionScannerAsResultScanner.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.region;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Wrap a {@link RegionScanner} as a {@link ResultScanner}.
+ */
+@InterfaceAudience.Private
+class RegionScannerAsResultScanner implements ResultScanner {
+
+  private static final Logger LOG = LoggerFactory.getLogger(RootStore.class);
+
+  private final RegionScanner scanner;
+
+  private boolean moreRows = true;
+
+  private final List<Cell> cells = new ArrayList<>();
+
+  public RegionScannerAsResultScanner(RegionScanner scanner) {
+    this.scanner = scanner;
+  }
+
+  @Override
+  public boolean renewLease() {
+    return true;
+  }
+
+  @Override
+  public Result next() throws IOException {
+    if (!moreRows) {
+      return null;
+    }
+    for (;;) {
+      moreRows = scanner.next(cells);
+      if (cells.isEmpty()) {
+        if (!moreRows) {
+          return null;
+        } else {
+          continue;
+        }
+      }
+      Result result = Result.create(cells);
+      cells.clear();
+      return result;
+    }
+  }
+
+  @Override
+  public ScanMetrics getScanMetrics() {
+    return null;
+  }
+
+  @Override
+  public void close() {
+    try {
+      scanner.close();
+    } catch (IOException e) {
+      LOG.warn("Failed to close scanner", e);
+    }
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/RootStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/RootStore.java
new file mode 100644
index 0000000..0607f9e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/RootStore.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.region;
+
+import static org.apache.hadoop.hbase.HConstants.NO_NONCE;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.CatalogFamilyFormat;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+import org.apache.hadoop.hbase.util.AtomicUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A wrapper of {@link MasterRegion} to support root table storage.
+ */
+@InterfaceAudience.Private
+public class RootStore {
+
+  private static final Logger LOG = LoggerFactory.getLogger(RootStore.class);
+
+  private final MasterRegion region;
+
+  private final AtomicLong lastModifiedSeqId = new AtomicLong(HConstants.NO_SEQNUM);
+
+  public RootStore(MasterRegion region) {
+    this.region = region;
+    lastModifiedSeqId.set(region.getReadPoint());
+    region.getWAL().registerWALActionsListener(new WALActionsListener() {
+
+      @Override
+      public void postAppend(long entryLen, long elapsedTimeMillis, WALKey logKey, WALEdit logEdit)
+        throws IOException {
+        for (byte[] family : logEdit.getFamilies()) {
+          // we only care about catalog family
+          if (!Bytes.equals(family, HConstants.CATALOG_FAMILY)) {
+            return;
+          }
+        }
+        AtomicUtils.updateMax(lastModifiedSeqId, logKey.getSequenceId());
+      }
+    });
+  }
+
+  public ResultScanner getScanner(Scan scan) throws IOException {
+    return new RegionScannerAsResultScanner(region.getScanner(scan));
+  }
+
+  public Result get(Get get) throws IOException {
+    return region.get(get);
+  }
+
+  public void put(Put put) throws IOException {
+    region.update(r -> r.put(put));
+  }
+
+  public void delete(Delete delete) throws IOException {
+    region.update(r -> r.delete(delete));
+  }
+
+  public void delete(List<Delete> deletes) throws IOException {
+    region.update(r -> {
+      for (Delete delete : deletes) {
+        r.delete(delete);
+      }
+    });
+  }
+
+  public void multiMutate(List<Mutation> mutations) throws IOException {
+    region.update(r -> {
+      List<byte[]> rowsToLock =
+        mutations.stream().map(Mutation::getRow).collect(Collectors.toList());
+      r.mutateRowsWithLocks(mutations, rowsToLock, NO_NONCE, NO_NONCE);
+    });
+  }
+
+  public List<RegionLocations> getAllMetaRegionLocations(boolean excludeOfflinedSplitParents)
+    throws IOException {
+    List<RegionLocations> list = new ArrayList<>();
+    try (ResultScanner scanner = getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
+      for (;;) {
+        Result result = scanner.next();
+        if (result == null) {
+          break;
+        }
+        RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result);
+        if (locs == null) {
+          LOG.warn("No locations in {}", result);
+          continue;
+        }
+        HRegionLocation loc = locs.getRegionLocation();
+        if (loc == null) {
+          LOG.warn("No non null location in {}", result);
+          continue;
+        }
+        RegionInfo info = loc.getRegion();
+        if (info == null) {
+          LOG.warn("No serialized RegionInfo in {}", result);
+          continue;
+        }
+        if (excludeOfflinedSplitParents && info.isSplitParent()) {
+          continue;
+        }
+        list.add(locs);
+      }
+    }
+    return list;
+  }
+
+  public Pair<Long, List<RegionLocations>> sync(long lastSyncSeqId) throws IOException {
+    long lastModSeqId = Math.min(lastModifiedSeqId.get(), region.getReadPoint());
+    if (lastModSeqId <= lastSyncSeqId) {
+      return Pair.newPair(lastSyncSeqId, Collections.emptyList());
+    }
+    return Pair.newPair(lastModSeqId, getAllMetaRegionLocations(false));
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
index 4385a5a..93e8aa5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
@@ -155,12 +155,13 @@ public class DummyAsyncClusterConnection implements AsyncClusterConnection {
   }
 
   @Override
-  public Connection toConnection() {
+  public CompletableFuture<Pair<Long, List<HRegionLocation>>> syncRoot(long lastSyncSeqId,
+    int callTimeoutMs) {
     return null;
   }
 
   @Override
-  public CompletableFuture<List<HRegionLocation>> getAllMetaRegionLocations(int callTimeoutMs) {
+  public Connection toConnection() {
     return null;
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
index 4409d29..fadee5d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
 import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
@@ -131,8 +131,8 @@ public class TestFailedMetaReplicaAssigment {
 
     @Override
     public AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new BrokenMasterMetaAssignmentManager(master, masterRegion);
+      RootStore rootStore) {
+      return new BrokenMasterMetaAssignmentManager(master, rootStore);
     }
   }
 
@@ -140,8 +140,8 @@ public class TestFailedMetaReplicaAssigment {
     MasterServices master;
 
     public BrokenMasterMetaAssignmentManager(final MasterServices master,
-      MasterRegion masterRegion) {
-      super(master, masterRegion);
+      RootStore rootStore) {
+      super(master, rootStore);
       this.master = master;
     }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestBackupMasterSyncRoot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestBackupMasterSyncRoot.java
new file mode 100644
index 0000000..51b8cf8
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestBackupMasterSyncRoot.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.StartTestingClusterOption;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionLocateType;
+import org.apache.hadoop.hbase.master.MetaLocationCache.CacheHolder;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestBackupMasterSyncRoot {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestBackupMasterSyncRoot.class);
+
+  private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL.getConfiguration().setInt(MetaLocationCache.SYNC_INTERVAL_SECONDS, 1);
+    StartTestingClusterOption option =
+      StartTestingClusterOption.builder().numMasters(2).numRegionServers(3).build();
+    UTIL.startMiniCluster(option);
+    UTIL.getAdmin().balancerSwitch(false, true);
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testSync() throws Exception {
+    HMaster active = UTIL.getHBaseCluster().getMaster();
+    AssignmentManager activeAM = active.getAssignmentManager();
+    RegionInfo meta =
+      activeAM.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).get(0);
+    ServerName expected = activeAM.getRegionStates().getRegionStateNode(meta).getRegionLocation();
+    HMaster backup = UTIL.getHBaseCluster().getMasterThreads().stream().map(t -> t.getMaster())
+      .filter(h -> h != active).findFirst().get();
+    MetaLocationCache cache = backup.getMetaLocationCache();
+    UTIL.waitFor(10000, () -> {
+      RegionLocations loc = cache.locateMeta(HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT);
+      return loc != null && loc.getRegionLocation().getServerName().equals(expected);
+    });
+    CacheHolder currentHolder = cache.holder.get();
+    assertNotNull(currentHolder);
+    long lastSyncSeqId = currentHolder.lastSyncSeqId;
+    long currentMVCC = active.masterRegion.getReadPoint();
+    assertTrue(lastSyncSeqId <= currentMVCC);
+    TableName table = TableName.valueOf("test");
+    UTIL.createTable(table, Bytes.toBytes("f"));
+    UTIL.waitTableAvailable(table);
+    long newMVCC = active.masterRegion.getReadPoint();
+    // we have created several new procedures so the read point should be advanced
+    assertTrue(newMVCC > currentMVCC);
+    Thread.sleep(3000);
+    // should not change since the root family is not changed
+    assertSame(currentHolder, cache.holder.get());
+
+    ServerName newExpected =
+      UTIL.getAdmin().getRegionServers().stream().filter(s -> !s.equals(expected)).findAny().get();
+    active.getAssignmentManager().moveAsync(new RegionPlan(meta, expected, newExpected)).get();
+    assertEquals(newExpected,
+      activeAM.getRegionStates().getRegionStateNode(meta).getRegionLocation());
+    UTIL.waitFor(10000, () -> {
+      RegionLocations loc = cache.locateMeta(HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT);
+      return loc != null && loc.getRegionLocation().getServerName().equals(newExpected);
+    });
+    CacheHolder newHolder = cache.holder.get();
+    // this time the cache should be changed
+    assertNotSame(currentHolder, newHolder);
+    assertTrue(newHolder.lastSyncSeqId > newMVCC);
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java
index 73ff415..5fdd451 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -71,8 +71,8 @@ public class TestCloseAnOpeningRegion {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManager(master, masterRegion) {
+      RootStore rootStore) {
+      return new AssignmentManager(master, rootStore) {
 
         @Override
         public ReportRegionStateTransitionResponse reportRegionStateTransition(
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java
index c224440..4449e9e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.ServerState;
 import org.apache.hadoop.hbase.master.assignment.ServerStateNode;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -167,15 +167,15 @@ public class TestClusterRestartFailover extends AbstractTestRestartCluster {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
   }
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaLocationCache.java
index 306767e..2e4d3af 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaLocationCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaLocationCache.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FutureUtils;
+import org.apache.hadoop.hbase.util.Pair;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -99,7 +101,7 @@ public class TestMetaLocationCache {
   @Test
   public void testError() throws InterruptedException {
     AsyncClusterConnection conn = mock(AsyncClusterConnection.class);
-    when(conn.getAllMetaRegionLocations(anyInt()))
+    when(conn.syncRoot(anyLong(), anyInt()))
       .thenReturn(FutureUtils.failedFuture(new RuntimeException("inject error")));
     when(master.getAsyncClusterConnection()).thenReturn(conn);
     Thread.sleep(2000);
@@ -109,8 +111,8 @@ public class TestMetaLocationCache {
     HRegionLocation loc =
       new HRegionLocation(RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(),
         ServerName.valueOf("localhost", 12345, System.currentTimeMillis()));
-    when(conn.getAllMetaRegionLocations(anyInt()))
-      .thenReturn(CompletableFuture.completedFuture(Arrays.asList(loc)));
+    when(conn.syncRoot(anyLong(), anyInt()))
+      .thenReturn(CompletableFuture.completedFuture(Pair.newPair(1L, Arrays.asList(loc))));
     Thread.sleep(2000);
     List<HRegionLocation> list = cache.getAllMetaRegionLocations(false);
     assertEquals(1, list.size());
@@ -131,8 +133,8 @@ public class TestMetaLocationCache {
       ServerName.valueOf("127.0.0.2", 12345, System.currentTimeMillis()));
     HRegionLocation daughter2Loc = new HRegionLocation(daughter2,
       ServerName.valueOf("127.0.0.3", 12345, System.currentTimeMillis()));
-    when(conn.getAllMetaRegionLocations(anyInt())).thenReturn(
-      CompletableFuture.completedFuture(Arrays.asList(parentLoc, daughter1Loc, daughter2Loc)));
+    when(conn.syncRoot(anyLong(), anyInt())).thenReturn(CompletableFuture
+      .completedFuture(Pair.newPair(1L, Arrays.asList(parentLoc, daughter1Loc, daughter2Loc))));
     when(master.getAsyncClusterConnection()).thenReturn(conn);
     Thread.sleep(2000);
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index d22040b..01cb474 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
 import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -111,9 +112,10 @@ public class MockMasterServices extends MockNoopMasterServices {
       conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)?
         null: new SplitWALManager(this);
     this.masterRegion = MasterRegionFactory.create(this);
+    RootStore rootStore = new RootStore(masterRegion);
     // Mock an AM.
     this.assignmentManager =
-      new AssignmentManager(this, masterRegion, new MockRegionStateStore(this, masterRegion));
+      new AssignmentManager(this, rootStore, new MockRegionStateStore(this, rootStore));
     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
     this.serverManager = new ServerManager(this);
     this.tableStateManager = Mockito.mock(TableStateManager.class);
@@ -294,8 +296,8 @@ public class MockMasterServices extends MockNoopMasterServices {
   }
 
   private static class MockRegionStateStore extends RegionStateStore {
-    public MockRegionStateStore(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public MockRegionStateStore(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
index 4c0eac0..7e25c22 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -56,8 +56,8 @@ public class TestOpenRegionProcedureBackoff {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -77,8 +77,8 @@ public class TestOpenRegionProcedureBackoff {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java
index 93711ba..0016420 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -75,8 +75,8 @@ public class TestOpenRegionProcedureHang {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master,MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -112,8 +112,8 @@ public class TestOpenRegionProcedureHang {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java
index 21fb63e..b9e4573 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -70,8 +70,8 @@ public class TestRaceBetweenSCPAndDTP {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master,masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -97,8 +97,8 @@ public class TestRaceBetweenSCPAndDTP {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java
index ae21c75..cbcb2f1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -70,8 +70,8 @@ public class TestRaceBetweenSCPAndTRSP {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -110,8 +110,8 @@ public class TestRaceBetweenSCPAndTRSP {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
index ad8ad7a..94d3a02 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -81,8 +81,8 @@ public class TestRegionAssignedToMultipleRegionServers {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -116,8 +116,8 @@ public class TestRegionAssignedToMultipleRegionServers {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java
index f17c09d..e60ae54 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -69,8 +69,8 @@ public class TestReportOnlineRegionsRace {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -112,8 +112,8 @@ public class TestReportOnlineRegionsRace {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
index a271f17..76156ff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -83,8 +83,8 @@ public class TestReportRegionStateTransitionFromDeadServer {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -123,8 +123,8 @@ public class TestReportRegionStateTransitionFromDeadServer {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java
index 1aa0f34..ac0dcdc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -61,8 +61,8 @@ public class TestReportRegionStateTransitionRetry {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -90,8 +90,8 @@ public class TestReportRegionStateTransitionRetry {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
index eeeeda6..932a4be 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -90,8 +90,8 @@ public class TestSCPGetRegionsRace {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AssignmentManagerForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -136,8 +136,8 @@ public class TestSCPGetRegionsRace {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AssignmentManagerForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AssignmentManagerForTest(master, rootStore);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
index 0bc97fa..09520aa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.RootStore;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -136,8 +136,8 @@ public class TestWakeUpUnexpectedProcedure {
 
   private static final class AMForTest extends AssignmentManager {
 
-    public AMForTest(MasterServices master, MasterRegion masterRegion) {
-      super(master, masterRegion);
+    public AMForTest(MasterServices master, RootStore rootStore) {
+      super(master, rootStore);
     }
 
     @Override
@@ -204,8 +204,8 @@ public class TestWakeUpUnexpectedProcedure {
 
     @Override
     protected AssignmentManager createAssignmentManager(MasterServices master,
-      MasterRegion masterRegion) {
-      return new AMForTest(master, masterRegion);
+      RootStore rootStore) {
+      return new AMForTest(master, rootStore);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java
index ecf3ef5..1a6750b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java
@@ -67,6 +67,11 @@ public class TestRegionMover2 {
 
   private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
 
+  // when moving region, we first need to get the location of meta so it will call master inside the
+  // master rpc handler thread, which may cause dead lock if we have more than 3 threads here since
+  // we only have 3 rpc handlers for master in UT.
+  private static final int MAX_THREADS = 2;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL.startMiniCluster(3);
@@ -116,7 +121,7 @@ public class TestRegionMover2 {
       .collect(Collectors.toList());
     RegionMover.RegionMoverBuilder rmBuilder =
       new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true)
-        .maxthreads(8);
+        .maxthreads(MAX_THREADS);
     try (RegionMover rm = rmBuilder.build()) {
       LOG.debug("Unloading {}", regionServer.getServerName());
       rm.unload();
@@ -153,7 +158,7 @@ public class TestRegionMover2 {
 
     RegionMover.RegionMoverBuilder rmBuilder =
       new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true)
-        .maxthreads(8);
+        .maxthreads(MAX_THREADS);
     try (RegionMover rm = rmBuilder.build()) {
       LOG.debug("Unloading {}", regionServer.getServerName());
       rm.unload();
@@ -194,13 +199,13 @@ public class TestRegionMover2 {
     admin.flush(tableName);
     HRegionServer regionServer = cluster.getRegionServer(0);
     String rsName = regionServer.getServerName().getAddress().toString();
-    int numRegions = regionServer.getNumberOfOnlineRegions();
+    regionServer.getNumberOfOnlineRegions();
     List<HRegion> hRegions = regionServer.getRegions().stream()
       .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName))
       .collect(Collectors.toList());
     RegionMover.RegionMoverBuilder rmBuilder =
       new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true)
-        .maxthreads(8);
+        .maxthreads(MAX_THREADS);
     try (RegionMover rm = rmBuilder.build()) {
       LOG.debug("Unloading {}", regionServer.getServerName());
       rm.unload();

[hbase] 06/09: HBASE-24606 Implement meta merge (#2311)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d96c6107b22e61a7473ce7d8c31877367bfd3f94
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Wed Sep 9 12:38:32 2020 +0800

    HBASE-24606 Implement meta merge (#2311)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../hbase/master/assignment/AssignmentManager.java |  2 +-
 .../hbase/master/assignment/RegionStateStore.java  |  8 ++-
 ...etaSplit.java => TestSimpleMetaSplitMerge.java} | 73 +++++++++++++++++-----
 3 files changed, 65 insertions(+), 18 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index a625fb3..fdaa460 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1953,7 +1953,7 @@ public class AssignmentManager {
    * References removed).
    */
   public void markRegionAsMerged(final RegionInfo child, final ServerName serverName,
-        RegionInfo [] mergeParents)
+    RegionInfo[] mergeParents)
       throws IOException {
     final RegionStateNode node = regionStates.getOrCreateRegionStateNode(child);
     node.setState(State.MERGED);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index c290c5e..98e6f2e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -312,8 +312,12 @@ public class RegionStateStore {
   private Result getRegionCatalogResult(RegionInfo region) throws IOException {
     Get get =
       new Get(CatalogFamilyFormat.getMetaKeyForRegion(region)).addFamily(HConstants.CATALOG_FAMILY);
-    try (Table table = getMetaTable()) {
-      return table.get(get);
+    if (region.isMetaRegion()) {
+      return masterRegion.get(get);
+    } else {
+      try (Table table = getMetaTable()) {
+        return table.get(get);
+      }
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplitMerge.java
similarity index 59%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplit.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplitMerge.java
index 6d50f71..9eb263a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplitMerge.java
@@ -20,17 +20,23 @@ package org.apache.hadoop.hbase;
 import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -38,11 +44,11 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category({ MiscTests.class, MediumTests.class })
-public class TestSimpleMetaSplit {
+public class TestSimpleMetaSplitMerge {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestSimpleMetaSplit.class);
+    HBaseClassTestRule.forClass(TestSimpleMetaSplitMerge.class);
 
   private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
 
@@ -70,29 +76,66 @@ public class TestSimpleMetaSplit {
     UTIL.shutdownMiniCluster();
   }
 
+  private void assertMetaRegionCount(int count) {
+    // do not count it from client as it will reset the location cache for meta table
+    assertEquals(count, UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager()
+      .getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size());
+  }
+
+  private void clearCache(TableName tableName) throws IOException {
+    try (RegionLocator locator = UTIL.getConnection().getRegionLocator(tableName)) {
+      locator.clearRegionLocationCache();
+    }
+  }
+
+  private void assertValue(TableName tableName, String row) throws IOException {
+    try (Table table = UTIL.getConnection().getTable(tableName)) {
+      Result result = table.get(new Get(Bytes.toBytes(row)));
+      assertEquals(row, Bytes.toString(result.getValue(CF, CQ)));
+    }
+  }
+
   @Test
-  public void test() throws IOException {
+  public void test() throws Exception {
     try (Table table = UTIL.getConnection().getTable(TD1.getTableName())) {
       table.put(new Put(Bytes.toBytes("row1")).addColumn(CF, CQ, Bytes.toBytes("row1")));
     }
     try (Table table = UTIL.getConnection().getTable(TD2.getTableName())) {
       table.put(new Put(Bytes.toBytes("row2")).addColumn(CF, CQ, Bytes.toBytes("row2")));
     }
+    Admin admin = UTIL.getAdmin();
     // split meta
-    UTIL.getAdmin().split(TableName.META_TABLE_NAME, Bytes.toBytes("b"));
-    // do not count it from client as it will reset the location cache for meta table
-    assertEquals(2, UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
-      .getRegionsOfTable(TableName.META_TABLE_NAME).size());
+    admin.split(TableName.META_TABLE_NAME, Bytes.toBytes("b"));
+    assertMetaRegionCount(2);
     // clear the cache for table 'b'
-    try (RegionLocator locator = UTIL.getConnection().getRegionLocator(TD2.getTableName())) {
-      locator.clearRegionLocationCache();
-    }
+    clearCache(TD2.getTableName());
     // make sure that we could get the location of the TD2 from the second meta region
-    try (Table table = UTIL.getConnection().getTable(TD2.getTableName())) {
-      Result result = table.get(new Get(Bytes.toBytes("row2")));
-      assertEquals("row2", Bytes.toString(result.getValue(CF, CQ)));
-    }
+    assertValue(TD2.getTableName(), "row2");
     // assert from client side
-    assertEquals(2, UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).size());
+    List<RegionInfo> regions = admin.getRegions(TableName.META_TABLE_NAME);
+    assertEquals(2, regions.size());
+    // compact to make sure we can merge
+    for (JVMClusterUtil.RegionServerThread t : UTIL.getMiniHBaseCluster()
+      .getRegionServerThreads()) {
+      for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
+        if (TableName.isMetaTableName(r.getRegionInfo().getTable())) {
+          r.compact(true);
+          for (HStore store : r.getStores()) {
+            store.closeAndArchiveCompactedFiles();
+          }
+        }
+      }
+    }
+    // merge the 2 regions back to 1
+    admin.mergeRegionsAsync(regions.stream().map(RegionInfo::getRegionName).toArray(byte[][]::new),
+      false).get();
+    assertMetaRegionCount(1);
+    // clear the cache for table 'a' and 'b'
+    clearCache(TD1.getTableName());
+    clearCache(TD2.getTableName());
+
+    // make sure that we could still get the locations from the new meta region
+    assertValue(TD2.getTableName(), "row2");
+    assertValue(TD1.getTableName(), "row1");
   }
 }

[hbase] 07/09: HBASE-24607 Implement CatalogJanitor for 'root table' (#2377)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 75ac29c638daccafe118841689b36a519dab392d
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Fri Sep 11 13:31:00 2020 +0800

    HBASE-24607 Implement CatalogJanitor for 'root table' (#2377)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../hadoop/hbase/ClientMetaTableAccessor.java      |  7 --
 .../org/apache/hadoop/hbase/MetaTableAccessor.java | 10 ---
 .../hbase/master/assignment/GCRegionProcedure.java |  8 +--
 .../hbase/master/assignment/RegionStateStore.java  | 78 ++++++++++++++++++----
 .../hbase/master/janitor/CatalogJanitor.java       |  4 +-
 .../hbase/master/janitor/ReportMakingVisitor.java  | 15 ++---
 .../hadoop/hbase/TestSimpleMetaSplitMerge.java     | 42 +++++++++---
 7 files changed, 103 insertions(+), 61 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
index 74d2322..ed0d9b4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase;
 
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
-import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -334,12 +333,6 @@ public final class ClientMetaTableAccessor {
   }
 
   /**
-   * Implementations 'visit' a catalog table row but with close() at the end.
-   */
-  public interface CloseableVisitor extends Visitor, Closeable {
-  }
-
-  /**
    * A {@link Visitor} that collects content out of passed {@link Result}.
    */
   private static abstract class CollectingVisitor<T> implements Visitor {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 512916f..385f2b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase;
 
 import edu.umd.cs.findbugs.annotations.NonNull;
 import edu.umd.cs.findbugs.annotations.Nullable;
-import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -49,7 +48,6 @@ import org.apache.hadoop.hbase.filter.SubstringComparator;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.ExceptionUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -504,14 +502,6 @@ public final class MetaTableAccessor {
         }
       }
     }
-    if (visitor instanceof Closeable) {
-      try {
-        ((Closeable) visitor).close();
-      } catch (Throwable t) {
-        ExceptionUtil.rethrowIfInterrupt(t);
-        LOG.debug("Got exception in closing the meta scanner visitor", t);
-      }
-    }
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
index dfd2314..e21e51d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
@@ -109,12 +109,8 @@ public class GCRegionProcedure extends AbstractStateMachineRegionProcedure<GCReg
           // TODO: Purge metadata before removing from HDFS? This ordering is copied
           // from CatalogJanitor.
           AssignmentManager am = masterServices.getAssignmentManager();
-          if (am != null) {
-            if (am.getRegionStates() != null) {
-              am.getRegionStates().deleteRegion(getRegion());
-            }
-          }
-          env.getAssignmentManager().getRegionStateStore().deleteRegion(getRegion());
+          am.getRegionStates().deleteRegion(getRegion());
+          am.getRegionStateStore().deleteRegion(getRegion());
           masterServices.getServerManager().removeRegion(getRegion());
           FavoredNodesManager fnm = masterServices.getFavoredNodesManager();
           if (fnm != null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 98e6f2e..2ffd232 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -66,8 +66,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.wal.WALSplitUtil;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -227,6 +225,28 @@ public class RegionStateStore {
     }
   }
 
+  public void scanCatalog(ClientMetaTableAccessor.Visitor visitor) throws IOException {
+    // scan meta first
+    MetaTableAccessor.fullScanRegions(master.getConnection(), visitor);
+    // scan root
+    try (RegionScanner scanner =
+      masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
+      boolean moreRows;
+      List<Cell> cells = new ArrayList<>();
+      do {
+        moreRows = scanner.next(cells);
+        if (cells.isEmpty()) {
+          continue;
+        }
+        Result result = Result.create(cells);
+        cells.clear();
+        if (!visitor.visit(result)) {
+          break;
+        }
+      } while (moreRows);
+    }
+  }
+
   public void mirrorMetaLocation(RegionInfo regionInfo, ServerName serverName, State state)
     throws IOException {
     try {
@@ -454,7 +474,7 @@ public class RegionStateStore {
     if (cells == null || cells.length == 0) {
       return;
     }
-    Delete delete = new Delete(mergeRegion.getRegionName());
+    Delete delete = new Delete(CatalogFamilyFormat.getMetaKeyForRegion(mergeRegion));
     List<byte[]> qualifiers = new ArrayList<>();
     for (Cell cell : cells) {
       if (!CatalogFamilyFormat.isMergeQualifierPrefix(cell)) {
@@ -473,8 +493,13 @@ public class RegionStateStore {
         " in meta table, they are cleaned up already, Skip.");
       return;
     }
-    try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
-      table.delete(delete);
+    debugLogMutation(delete);
+    if (mergeRegion.isMetaRegion()) {
+      masterRegion.update(r -> r.delete(delete));
+    } else {
+      try (Table table = getMetaTable()) {
+        table.delete(delete);
+      }
     }
     LOG.info("Deleted merge references in " + mergeRegion.getRegionNameAsString() +
       ", deleted qualifiers " +
@@ -517,19 +542,44 @@ public class RegionStateStore {
     deleteRegions(regions, EnvironmentEdgeManager.currentTime());
   }
 
+  private static Delete makeDeleteRegionInfo(RegionInfo regionInfo, long ts) {
+    return new Delete(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo))
+      .addFamily(HConstants.CATALOG_FAMILY, ts);
+  }
+
+  private static List<Delete> makeDeleteRegionInfos(List<RegionInfo> regionInfos, long ts) {
+    return regionInfos.stream().map(ri -> makeDeleteRegionInfo(ri, ts))
+      .collect(Collectors.toList());
+  }
+
   private void deleteRegions(List<RegionInfo> regions, long ts) throws IOException {
-    List<Delete> deletes = new ArrayList<>(regions.size());
-    for (RegionInfo hri : regions) {
-      Delete e = new Delete(hri.getRegionName());
-      e.addFamily(HConstants.CATALOG_FAMILY, ts);
-      deletes.add(e);
+    List<RegionInfo> metaRegions = new ArrayList<>();
+    List<RegionInfo> nonMetaRegions = new ArrayList<>();
+    for (RegionInfo region : regions) {
+      if (region.isMetaRegion()) {
+        metaRegions.add(region);
+      } else {
+        nonMetaRegions.add(region);
+      }
     }
-    try (Table table = getMetaTable()) {
+    if (!metaRegions.isEmpty()) {
+      List<Delete> deletes = makeDeleteRegionInfos(metaRegions, ts);
       debugLogMutations(deletes);
-      table.delete(deletes);
+      for (Delete d : deletes) {
+        masterRegion.update(r -> r.delete(d));
+      }
+      LOG.info("Deleted {} regions from ROOT", metaRegions.size());
+      LOG.debug("Deleted regions: {}", metaRegions);
+    }
+    if (!nonMetaRegions.isEmpty()) {
+      List<Delete> deletes = makeDeleteRegionInfos(nonMetaRegions, ts);
+      debugLogMutations(deletes);
+      try (Table table = getMetaTable()) {
+        table.delete(deletes);
+      }
+      LOG.info("Deleted {} regions from META", nonMetaRegions.size());
+      LOG.debug("Deleted regions: {}", nonMetaRegions);
     }
-    LOG.info("Deleted {} regions from META", regions.size());
-    LOG.debug("Deleted regions: {}", regions);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
index fee218e..b01e07d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
@@ -231,8 +231,8 @@ public class CatalogJanitor extends ScheduledChore {
   // will be override in tests.
   protected Report scanForReport() throws IOException {
     ReportMakingVisitor visitor = new ReportMakingVisitor(this.services);
-    // Null tablename means scan all of meta.
-    MetaTableAccessor.scanMetaForTableRegions(this.services.getConnection(), visitor, null);
+    services.getAssignmentManager().getRegionStateStore().scanCatalog(visitor);
+    visitor.done();
     return visitor.getReport();
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
index 4dd514e..77188de 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
@@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
  * {@link #close()}'d.
  */
 @InterfaceAudience.Private
-class ReportMakingVisitor implements ClientMetaTableAccessor.CloseableVisitor {
+class ReportMakingVisitor implements ClientMetaTableAccessor.Visitor {
 
   private static final Logger LOG = LoggerFactory.getLogger(ReportMakingVisitor.class);
 
@@ -199,14 +199,8 @@ class ReportMakingVisitor implements ClientMetaTableAccessor.CloseableVisitor {
   /**
    * @return True if table is disabled or disabling; defaults false!
    */
-  boolean isTableDisabled(RegionInfo ri) {
-    if (ri == null) {
-      return false;
-    }
-    if (this.services == null) {
-      return false;
-    }
-    if (this.services.getTableStateManager() == null) {
+  private boolean isTableDisabled(RegionInfo ri) {
+    if (ri.isMetaRegion()) {
       return false;
     }
     TableState state = null;
@@ -282,8 +276,7 @@ class ReportMakingVisitor implements ClientMetaTableAccessor.CloseableVisitor {
     return this.previous == null || !this.previous.getTable().equals(ri.getTable());
   }
 
-  @Override
-  public void close() throws IOException {
+  public void done() {
     // This is a table transition... after the last region. Check previous.
     // Should be last region. If not, its a hole on end of laster table.
     if (this.previous != null && !this.previous.isLast()) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplitMerge.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplitMerge.java
index 9eb263a..5e174f1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplitMerge.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSimpleMetaSplitMerge.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.List;
@@ -31,6 +32,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -95,6 +97,20 @@ public class TestSimpleMetaSplitMerge {
     }
   }
 
+  private void compactMeta() throws IOException {
+    for (JVMClusterUtil.RegionServerThread t : UTIL.getMiniHBaseCluster()
+      .getRegionServerThreads()) {
+      for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
+        if (TableName.isMetaTableName(r.getRegionInfo().getTable())) {
+          r.compact(true);
+          for (HStore store : r.getStores()) {
+            store.closeAndArchiveCompactedFiles();
+          }
+        }
+      }
+    }
+  }
+
   @Test
   public void test() throws Exception {
     try (Table table = UTIL.getConnection().getTable(TD1.getTableName())) {
@@ -104,6 +120,8 @@ public class TestSimpleMetaSplitMerge {
       table.put(new Put(Bytes.toBytes("row2")).addColumn(CF, CQ, Bytes.toBytes("row2")));
     }
     Admin admin = UTIL.getAdmin();
+    // turn off catalog janitor
+    admin.catalogJanitorSwitch(false);
     // split meta
     admin.split(TableName.META_TABLE_NAME, Bytes.toBytes("b"));
     assertMetaRegionCount(2);
@@ -115,17 +133,7 @@ public class TestSimpleMetaSplitMerge {
     List<RegionInfo> regions = admin.getRegions(TableName.META_TABLE_NAME);
     assertEquals(2, regions.size());
     // compact to make sure we can merge
-    for (JVMClusterUtil.RegionServerThread t : UTIL.getMiniHBaseCluster()
-      .getRegionServerThreads()) {
-      for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
-        if (TableName.isMetaTableName(r.getRegionInfo().getTable())) {
-          r.compact(true);
-          for (HStore store : r.getStores()) {
-            store.closeAndArchiveCompactedFiles();
-          }
-        }
-      }
-    }
+    compactMeta();
     // merge the 2 regions back to 1
     admin.mergeRegionsAsync(regions.stream().map(RegionInfo::getRegionName).toArray(byte[][]::new),
       false).get();
@@ -137,5 +145,17 @@ public class TestSimpleMetaSplitMerge {
     // make sure that we could still get the locations from the new meta region
     assertValue(TD2.getTableName(), "row2");
     assertValue(TD1.getTableName(), "row1");
+
+    // make sure that catalog janitor can clean up the merged regions
+    RegionStateStore regionStateStore =
+      UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
+    RegionInfo mergedRegion = admin.getRegions(TableName.META_TABLE_NAME).get(0);
+    assertTrue(regionStateStore.hasMergeRegions(mergedRegion));
+    // compact to make sure we could clean the merged regions
+    compactMeta();
+    // one for merged region, one for split parent
+    assertEquals(2, admin.runCatalogJanitor());
+    // the gc procedure is run in background so we need to wait here, can not check directly
+    UTIL.waitFor(30000, () -> !regionStateStore.hasMergeRegions(mergedRegion));
   }
 }

[hbase] 01/09: HBASE-24388 Store the locations of meta regions in master local store (#1746)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-24950
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 13b44d078a180ef2beaf34b60e1549e014bfac0a
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Jun 25 23:17:03 2020 +0800

    HBASE-24388 Store the locations of meta regions in master local store (#1746)
---
 .../apache/hadoop/hbase/CatalogFamilyFormat.java   |  17 +++
 .../org/apache/hadoop/hbase/master/HMaster.java    |  64 ++++++++-
 .../hbase/master/assignment/AssignmentManager.java |  82 +++++++-----
 .../hbase/master/assignment/RegionStateStore.java  | 143 ++++++++++++---------
 .../hbase/master/region/MasterRegionFactory.java   |   9 +-
 .../store/region/RegionProcedureStore.java         |   5 +-
 .../client/TestFailedMetaReplicaAssigment.java     |  11 +-
 .../hbase/master/TestCloseAnOpeningRegion.java     |   8 +-
 .../hbase/master/TestClusterRestartFailover.java   |  11 +-
 .../master/assignment/MockMasterServices.java      |  12 +-
 .../assignment/TestOpenRegionProcedureBackoff.java |  10 +-
 .../assignment/TestOpenRegionProcedureHang.java    |  10 +-
 .../assignment/TestRaceBetweenSCPAndDTP.java       |  10 +-
 .../assignment/TestRaceBetweenSCPAndTRSP.java      |  10 +-
 .../TestRegionAssignedToMultipleRegionServers.java |  10 +-
 .../assignment/TestReportOnlineRegionsRace.java    |  10 +-
 ...tReportRegionStateTransitionFromDeadServer.java |  10 +-
 .../TestReportRegionStateTransitionRetry.java      |  10 +-
 .../master/assignment/TestSCPGetRegionsRace.java   |  10 +-
 .../assignment/TestWakeUpUnexpectedProcedure.java  |  10 +-
 .../region/TestRegionProcedureStoreMigration.java  |   2 +-
 21 files changed, 309 insertions(+), 155 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java
index 3cf6cc0..978198b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java
@@ -28,6 +28,7 @@ import java.util.NavigableMap;
 import java.util.SortedMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -401,4 +403,19 @@ public class CatalogFamilyFormat {
     return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) &&
       PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX);
   }
+
+  public static Delete removeRegionReplica(byte[] metaRow, int replicaIndexToDeleteFrom,
+    int numReplicasToRemove) {
+    int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
+    long now = EnvironmentEdgeManager.currentTime();
+    Delete deleteReplicaLocations = new Delete(metaRow);
+    for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
+      deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, getServerColumn(i), now);
+      deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, getSeqNumColumn(i), now);
+      deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, getStartCodeColumn(i), now);
+      deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, getServerNameColumn(i), now);
+      deleteReplicaLocations.addColumns(HConstants.CATALOG_FAMILY, getRegionStateColumn(i), now);
+    }
+    return deleteReplicaLocations;
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 1791ce4..961c929 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -54,6 +54,11 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CatalogFamilyFormat;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilderFactory;
+import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
@@ -80,9 +85,11 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.CompactionState;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
@@ -99,6 +106,7 @@ import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
 import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
+import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
 import org.apache.hadoop.hbase.master.balancer.BalancerChore;
@@ -179,6 +187,7 @@ import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -212,6 +221,7 @@ import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
 import org.apache.hadoop.hbase.zookeeper.SnapshotCleanupTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
@@ -393,7 +403,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
   private ProcedureStore procedureStore;
 
-  // the master local storage to store procedure data, etc.
+  // the master local storage to store procedure data, root table, etc.
   private MasterRegion masterRegion;
 
   // handle table states
@@ -758,8 +768,50 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   // Will be overriden in test to inject customized AssignmentManager
   @InterfaceAudience.Private
-  protected AssignmentManager createAssignmentManager(MasterServices master) {
-    return new AssignmentManager(master);
+  protected AssignmentManager createAssignmentManager(MasterServices master,
+    MasterRegion masterRegion) {
+    return new AssignmentManager(master, masterRegion);
+  }
+
+  private void tryMigrateRootTableFromZooKeeper() throws IOException, KeeperException {
+    // try migrate data from zookeeper
+    try (RegionScanner scanner =
+      masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
+      List<Cell> cells = new ArrayList<>();
+      boolean moreRows = scanner.next(cells);
+      if (!cells.isEmpty() || moreRows) {
+        // notice that all replicas for a region are in the same row, so the migration can be
+        // done with in a one row put, which means if we have data in root table then we can make
+        // sure that the migration is done.
+        LOG.info("Root table already has data in it, skip migrating...");
+        return;
+      }
+    }
+    // start migrating
+    byte[] row = CatalogFamilyFormat.getMetaKeyForRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
+    Put put = new Put(row);
+    List<String> metaReplicaNodes = zooKeeper.getMetaReplicaNodes();
+    StringBuilder info = new StringBuilder("Migrating meta location:");
+    for (String metaReplicaNode : metaReplicaNodes) {
+      int replicaId = zooKeeper.getZNodePaths().getMetaReplicaIdFromZNode(metaReplicaNode);
+      RegionState state = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId);
+      info.append(" ").append(state);
+      put.setTimestamp(state.getStamp());
+      MetaTableAccessor.addRegionInfo(put, state.getRegion());
+      if (state.getServerName() != null) {
+        MetaTableAccessor.addLocation(put, state.getServerName(), HConstants.NO_SEQNUM, replicaId);
+      }
+      put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
+        .setFamily(HConstants.CATALOG_FAMILY)
+        .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp())
+        .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build());
+    }
+    if (!put.isEmpty()) {
+      LOG.info(info.toString());
+      masterRegion.update(r -> r.put(put));
+    } else {
+      LOG.info("No meta location avaiable on zookeeper, skip migrating...");
+    }
   }
 
   /**
@@ -775,6 +827,7 @@ public class HMaster extends HRegionServer implements MasterServices {
    * region server tracker
    * <ol type='i'>
    * <li>Create server manager</li>
+   * <li>Create root table</li>
    * <li>Create procedure executor, load the procedures, but do not start workers. We will start it
    * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same
    * server</li>
@@ -856,13 +909,16 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     // initialize master local region
     masterRegion = MasterRegionFactory.create(this);
+
+    tryMigrateRootTableFromZooKeeper();
+
     createProcedureExecutor();
     Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType =
       procedureExecutor.getActiveProceduresNoCopy().stream()
         .collect(Collectors.groupingBy(p -> p.getClass()));
 
     // Create Assignment Manager
-    this.assignmentManager = createAssignmentManager(this);
+    this.assignmentManager = createAssignmentManager(this, masterRegion);
     this.assignmentManager.start();
     // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as
     // completed, it could still be in the procedure list. This is a bit strange but is another
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 5622a58..73b6aa6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -34,6 +34,7 @@ import java.util.concurrent.locks.ReentrantLock;
 import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CatalogFamilyFormat;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
@@ -67,11 +69,13 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
 import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.SequenceId;
 import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -79,8 +83,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -196,18 +198,21 @@ public class AssignmentManager {
   private final int assignMaxAttempts;
   private final int assignRetryImmediatelyMaxAttempts;
 
+  private final MasterRegion masterRegion;
+
   private final Object checkIfShouldMoveSystemRegionLock = new Object();
 
   private Thread assignThread;
 
-  public AssignmentManager(final MasterServices master) {
-    this(master, new RegionStateStore(master));
+  public AssignmentManager(MasterServices master, MasterRegion masterRegion) {
+    this(master, masterRegion, new RegionStateStore(master, masterRegion));
   }
 
-  AssignmentManager(final MasterServices master, final RegionStateStore stateStore) {
+  AssignmentManager(MasterServices master, MasterRegion masterRegion, RegionStateStore stateStore) {
     this.master = master;
     this.regionStateStore = stateStore;
     this.metrics = new MetricsAssignmentManager();
+    this.masterRegion = masterRegion;
 
     final Configuration conf = master.getConfiguration();
 
@@ -249,34 +254,47 @@ public class AssignmentManager {
 
     // Start the Assignment Thread
     startAssignmentThread();
+    // load meta region states.
+    try (RegionScanner scanner =
+      masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) {
+      List<Cell> cells = new ArrayList<>();
+      boolean moreRows;
+      do {
+        moreRows = scanner.next(cells);
+        if (cells.isEmpty()) {
+          continue;
+        }
+        Result result = Result.create(cells);
+        cells.clear();
+        RegionStateStore
+          .visitMetaEntry((r, regionInfo, state, regionLocation, lastHost, openSeqNum) -> {
+            RegionStateNode regionNode = regionStates.getOrCreateRegionStateNode(regionInfo);
+            regionNode.setState(state);
+            regionNode.setLastHost(lastHost);
+            regionNode.setRegionLocation(regionLocation);
+            regionNode.setOpenSeqNum(openSeqNum);
+            if (regionNode.getProcedure() != null) {
+              regionNode.getProcedure().stateLoaded(this, regionNode);
+            }
+            if (regionLocation != null) {
+              regionStates.addRegionToServer(regionNode);
+            }
+            if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
+              setMetaAssigned(regionInfo, state == State.OPEN);
+            }
 
-    // load meta region state
-    ZKWatcher zkw = master.getZooKeeper();
-    // it could be null in some tests
-    if (zkw == null) {
-      return;
-    }
-    List<String> metaZNodes = zkw.getMetaReplicaNodes();
-    LOG.debug("hbase:meta replica znodes: {}", metaZNodes);
-    for (String metaZNode : metaZNodes) {
-      int replicaId = zkw.getZNodePaths().getMetaReplicaIdFromZNode(metaZNode);
-      // here we are still in the early steps of active master startup. There is only one thread(us)
-      // can access AssignmentManager and create region node, so here we do not need to lock the
-      // region node.
-      RegionState regionState = MetaTableLocator.getMetaRegionState(zkw, replicaId);
-      RegionStateNode regionNode = regionStates.getOrCreateRegionStateNode(regionState.getRegion());
-      regionNode.setRegionLocation(regionState.getServerName());
-      regionNode.setState(regionState.getState());
-      if (regionNode.getProcedure() != null) {
-        regionNode.getProcedure().stateLoaded(this, regionNode);
-      }
-      if (regionState.getServerName() != null) {
-        regionStates.addRegionToServer(regionNode);
-      }
-      if (RegionReplicaUtil.isDefaultReplica(replicaId)) {
-        setMetaAssigned(regionState.getRegion(), regionState.getState() == State.OPEN);
-      }
-      LOG.debug("Loaded hbase:meta {}", regionNode);
+            if (regionInfo.isFirst()) {
+              // for compatibility, mirror the meta region state to zookeeper
+              try {
+                regionStateStore.mirrorMetaLocation(regionInfo, regionLocation, state);
+              } catch (IOException e) {
+                LOG.warn("Failed to mirror region location for {} to zk",
+                  regionNode.toShortString());
+              }
+            }
+            LOG.debug("Loaded hbase:meta {}", regionNode);
+          }, result);
+      } while (moreRows);
     }
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 8818067..87c04da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -56,8 +56,10 @@ import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -92,10 +94,14 @@ public class RegionStateStore {
 
   private final MasterServices master;
 
-  public RegionStateStore(final MasterServices master) {
+  private final MasterRegion masterRegion;
+
+  public RegionStateStore(MasterServices master, MasterRegion masterRegion) {
     this.master = master;
+    this.masterRegion = masterRegion;
   }
 
+  @FunctionalInterface
   public interface RegionStateVisitor {
     void visitRegionState(Result result, RegionInfo regionInfo, State state,
       ServerName regionLocation, ServerName lastHost, long openSeqNum);
@@ -142,8 +148,8 @@ public class RegionStateStore {
     }
   }
 
-  private void visitMetaEntry(final RegionStateVisitor visitor, final Result result)
-    throws IOException {
+  public static void visitMetaEntry(final RegionStateVisitor visitor, final Result result)
+      throws IOException {
     final RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result);
     if (rl == null) return;
 
@@ -173,33 +179,14 @@ public class RegionStateStore {
   }
 
   void updateRegionLocation(RegionStateNode regionStateNode) throws IOException {
-    if (regionStateNode.getRegionInfo().isMetaRegion()) {
-      updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(),
-        regionStateNode.getState());
-    } else {
-      long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() :
-        HConstants.NO_SEQNUM;
-      updateUserRegionLocation(regionStateNode.getRegionInfo(), regionStateNode.getState(),
-        regionStateNode.getRegionLocation(), openSeqNum,
-        // The regionStateNode may have no procedure in a test scenario; allow for this.
-        regionStateNode.getProcedure() != null ? regionStateNode.getProcedure().getProcId() :
-          Procedure.NO_PROC_ID);
-    }
-  }
-
-  private void updateMetaLocation(RegionInfo regionInfo, ServerName serverName, State state)
-    throws IOException {
-    try {
-      MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName, regionInfo.getReplicaId(),
-        state);
-    } catch (KeeperException e) {
-      throw new IOException(e);
-    }
-  }
-
-  private void updateUserRegionLocation(RegionInfo regionInfo, State state,
-    ServerName regionLocation, long openSeqNum, long pid) throws IOException {
     long time = EnvironmentEdgeManager.currentTime();
+    long openSeqNum = regionStateNode.getState() == State.OPEN ? regionStateNode.getOpenSeqNum() :
+      HConstants.NO_SEQNUM;
+    RegionInfo regionInfo = regionStateNode.getRegionInfo();
+    State state = regionStateNode.getState();
+    ServerName regionLocation = regionStateNode.getRegionLocation();
+    TransitRegionStateProcedure rit = regionStateNode.getProcedure();
+    long pid = rit != null ? rit.getProcId() : Procedure.NO_PROC_ID;
     final int replicaId = regionInfo.getReplicaId();
     final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time);
     MetaTableAccessor.addRegionInfo(put, regionInfo);
@@ -234,12 +221,32 @@ public class RegionStateStore {
       .build());
     LOG.info(info.toString());
     updateRegionLocation(regionInfo, state, put);
+    if (regionInfo.isMetaRegion() && regionInfo.isFirst()) {
+      // mirror the meta location to zookeeper
+      mirrorMetaLocation(regionInfo, regionLocation, state);
+    }
+  }
+
+  public void mirrorMetaLocation(RegionInfo regionInfo, ServerName serverName, State state)
+      throws IOException {
+    try {
+      MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName, regionInfo.getReplicaId(),
+        state);
+    } catch (KeeperException e) {
+      throw new IOException(e);
+    }
   }
 
   private void updateRegionLocation(RegionInfo regionInfo, State state, Put put)
     throws IOException {
-    try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
-      table.put(put);
+    try {
+      if (regionInfo.isMetaRegion()) {
+        masterRegion.update(r -> r.put(put));
+      } else {
+        try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
+          table.put(put);
+        }
+      }
     } catch (IOException e) {
       // TODO: Revist!!!! Means that if a server is loaded, then we will abort our host!
       // In tests we abort the Master!
@@ -541,45 +548,63 @@ public class RegionStateStore {
       .addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
   }
 
+  private Delete deleteRegionReplicas(Result result, int oldReplicaCount, int newReplicaCount,
+    long now) {
+    RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result);
+    if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) {
+      return null;
+    }
+    Delete delete = new Delete(result.getRow());
+    for (int i = newReplicaCount; i < oldReplicaCount; i++) {
+      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i), now);
+      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i), now);
+      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i), now);
+      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i), now);
+      delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getRegionStateColumn(i),
+        now);
+    }
+    return delete;
+  }
+
   public void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount)
     throws IOException {
+    Scan scan = getScanForUpdateRegionReplicas(tableName);
+    List<Delete> deletes = new ArrayList<>();
+    long now = EnvironmentEdgeManager.currentTime();
     if (TableName.isMetaTableName(tableName)) {
-      ZKWatcher zk = master.getZooKeeper();
-      try {
-        for (int i = newReplicaCount; i < oldReplicaCount; i++) {
-          ZKUtil.deleteNode(zk, zk.getZNodePaths().getZNodeForReplica(i));
-        }
-      } catch (KeeperException e) {
-        throw new IOException(e);
+      try (RegionScanner scanner = masterRegion.getScanner(scan)) {
+        List<Cell> cells = new ArrayList<>();
+        boolean moreRows;
+        do {
+          cells.clear();
+          moreRows = scanner.next(cells);
+          if (cells.isEmpty()) {
+            continue;
+          }
+          Result result = Result.create(cells);
+          Delete delete = deleteRegionReplicas(result, oldReplicaCount, newReplicaCount, now);
+          if (delete != null) {
+            deletes.add(delete);
+          }
+        } while (moreRows);
       }
+      debugLogMutations(deletes);
+      masterRegion.update(r -> {
+        for (Delete d : deletes) {
+          r.delete(d);
+        }
+      });
     } else {
-      Scan scan = getScanForUpdateRegionReplicas(tableName);
-      List<Delete> deletes = new ArrayList<>();
-      long now = EnvironmentEdgeManager.currentTime();
       try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) {
         for (;;) {
           Result result = scanner.next();
           if (result == null) {
             break;
           }
-          RegionInfo primaryRegionInfo = CatalogFamilyFormat.getRegionInfo(result);
-          if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) {
-            continue;
-          }
-          Delete delete = new Delete(result.getRow());
-          for (int i = newReplicaCount; i < oldReplicaCount; i++) {
-            delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i),
-              now);
-            delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(i),
-              now);
-            delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i),
-              now);
-            delete.addColumns(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerNameColumn(i),
-              now);
-            delete.addColumns(HConstants.CATALOG_FAMILY,
-              CatalogFamilyFormat.getRegionStateColumn(i), now);
+          Delete delete = deleteRegionReplicas(result, oldReplicaCount, newReplicaCount, now);
+          if (delete != null) {
+            deletes.add(delete);
           }
-          deletes.add(delete);
         }
         debugLogMutations(deletes);
         metaTable.delete(deletes);
@@ -634,7 +659,7 @@ public class RegionStateStore {
     }
   }
 
-  private static byte[] getStateColumn(int replicaId) {
+  public static byte[] getStateColumn(int replicaId) {
     return replicaId == 0 ? HConstants.STATE_QUALIFIER :
       Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER +
         String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
index f1da308..cfa25f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.master.region;
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -80,6 +83,10 @@ public final class MasterRegionFactory {
   public static final byte[] PROC_FAMILY = Bytes.toBytes("proc");
 
   private static final TableDescriptor TABLE_DESC = TableDescriptorBuilder.newBuilder(TABLE_NAME)
+    .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
+      .setMaxVersions(HConstants.DEFAULT_HBASE_META_VERSIONS).setInMemory(true)
+      .setBlocksize(HConstants.DEFAULT_HBASE_META_BLOCK_SIZE).setBloomFilterType(BloomType.ROWCOL)
+      .setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).build())
     .setColumnFamily(ColumnFamilyDescriptorBuilder.of(PROC_FAMILY)).build();
 
   public static MasterRegion create(Server server) throws IOException {
@@ -100,7 +107,7 @@ public final class MasterRegionFactory {
     params.ringBufferSlotCount(conf.getInt(RING_BUFFER_SLOT_COUNT, DEFAULT_RING_BUFFER_SLOT_COUNT));
     long rollPeriodMs = conf.getLong(ROLL_PERIOD_MS_KEY, DEFAULT_ROLL_PERIOD_MS);
     params.rollPeriodMs(rollPeriodMs).archivedWalSuffix(ARCHIVED_WAL_SUFFIX)
-      .archivedHFileSuffix(ARCHIVED_HFILE_SUFFIX);
+      .archivedHFileSuffix(ARCHIVED_HFILE_SUFFIX).useMetaCellComparator(true);
     return MasterRegion.create(params);
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java
index 301b605..2d07dae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStore.java
@@ -442,8 +442,9 @@ public class RegionProcedureStore extends ProcedureStoreBase {
         Cell cell = cells.get(0);
         cells.clear();
         if (cell.getValueLength() == 0) {
-          region.update(r -> r
-            .delete(new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())));
+          region.update(
+            r -> r.delete(new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
+              .addFamily(PROC_FAMILY)));
         }
       }
     } catch (IOException e) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
index e336554..fde362c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
 import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
@@ -129,16 +130,18 @@ public class TestFailedMetaReplicaAssigment {
     }
 
     @Override
-    public AssignmentManager createAssignmentManager(MasterServices master) {
-      return new BrokenMasterMetaAssignmentManager(master);
+    public AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new BrokenMasterMetaAssignmentManager(master, masterRegion);
     }
   }
 
   public static class BrokenMasterMetaAssignmentManager extends AssignmentManager {
     MasterServices master;
 
-    public BrokenMasterMetaAssignmentManager(final MasterServices master) {
-      super(master);
+    public BrokenMasterMetaAssignmentManager(final MasterServices master,
+      MasterRegion masterRegion) {
+      super(master, masterRegion);
       this.master = master;
     }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java
index a79e8f3..73ff415 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -69,12 +70,13 @@ public class TestCloseAnOpeningRegion {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManager(master) {
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManager(master, masterRegion) {
 
         @Override
         public ReportRegionStateTransitionResponse reportRegionStateTransition(
-            ReportRegionStateTransitionRequest req) throws PleaseHoldException {
+          ReportRegionStateTransitionRequest req) throws PleaseHoldException {
           ReportRegionStateTransitionResponse resp = super.reportRegionStateTransition(req);
           TransitionCode code = req.getTransition(0).getTransitionCode();
           if (code == TransitionCode.OPENED && ARRIVE != null) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java
index f9300ed..c224440 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java
@@ -27,7 +27,6 @@ import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.CountDownLatch;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompatibilityFactory;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -40,6 +39,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.ServerState;
 import org.apache.hadoop.hbase.master.assignment.ServerStateNode;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -166,15 +166,16 @@ public class TestClusterRestartFailover extends AbstractTestRestartCluster {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
   }
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 445aeaf..d22040b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -50,6 +50,8 @@ import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
+import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -85,6 +87,7 @@ public class MockMasterServices extends MockNoopMasterServices {
   private final SplitWALManager splitWALManager;
   private final AssignmentManager assignmentManager;
   private final TableStateManager tableStateManager;
+  private final MasterRegion masterRegion;
 
   private MasterProcedureEnv procedureEnv;
   private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
@@ -107,9 +110,10 @@ public class MockMasterServices extends MockNoopMasterServices {
     this.splitWALManager =
       conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)?
         null: new SplitWALManager(this);
-
+    this.masterRegion = MasterRegionFactory.create(this);
     // Mock an AM.
-    this.assignmentManager = new AssignmentManager(this, new MockRegionStateStore(this));
+    this.assignmentManager =
+      new AssignmentManager(this, masterRegion, new MockRegionStateStore(this, masterRegion));
     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
     this.serverManager = new ServerManager(this);
     this.tableStateManager = Mockito.mock(TableStateManager.class);
@@ -290,8 +294,8 @@ public class MockMasterServices extends MockNoopMasterServices {
   }
 
   private static class MockRegionStateStore extends RegionStateStore {
-    public MockRegionStateStore(final MasterServices master) {
-      super(master);
+    public MockRegionStateStore(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
index 12ea426..4c0eac0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -55,8 +56,8 @@ public class TestOpenRegionProcedureBackoff {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -75,8 +76,9 @@ public class TestOpenRegionProcedureBackoff {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java
index 6fd4fb2..93711ba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -74,8 +75,8 @@ public class TestOpenRegionProcedureHang {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master,MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -110,8 +111,9 @@ public class TestOpenRegionProcedureHang {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java
index cca5663..21fb63e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -69,8 +70,8 @@ public class TestRaceBetweenSCPAndDTP {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master,masterRegion);
     }
 
     @Override
@@ -95,8 +96,9 @@ public class TestRaceBetweenSCPAndDTP {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java
index 788df3f..ae21c75 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -69,8 +70,8 @@ public class TestRaceBetweenSCPAndTRSP {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -108,8 +109,9 @@ public class TestRaceBetweenSCPAndTRSP {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
index da632d0..0cc510f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -80,8 +81,8 @@ public class TestRegionAssignedToMultipleRegionServers {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -114,8 +115,9 @@ public class TestRegionAssignedToMultipleRegionServers {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java
index b25b83d..f17c09d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -68,8 +69,8 @@ public class TestReportOnlineRegionsRace {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -110,8 +111,9 @@ public class TestReportOnlineRegionsRace {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
index 68d1e35..7fbf28b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -82,8 +83,8 @@ public class TestReportRegionStateTransitionFromDeadServer {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -121,8 +122,9 @@ public class TestReportRegionStateTransitionFromDeadServer {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java
index f493892..1aa0f34 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -60,8 +61,8 @@ public class TestReportRegionStateTransitionRetry {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -88,8 +89,9 @@ public class TestReportRegionStateTransitionRetry {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
index 8b85c2d..c4ad67c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -89,8 +90,8 @@ public class TestSCPGetRegionsRace {
 
   private static final class AssignmentManagerForTest extends AssignmentManager {
 
-    public AssignmentManagerForTest(MasterServices master) {
-      super(master);
+    public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -134,8 +135,9 @@ public class TestSCPGetRegionsRace {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AssignmentManagerForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AssignmentManagerForTest(master, masterRegion);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
index 2a19b0a..ee01223 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -135,8 +136,8 @@ public class TestWakeUpUnexpectedProcedure {
 
   private static final class AMForTest extends AssignmentManager {
 
-    public AMForTest(MasterServices master) {
-      super(master);
+    public AMForTest(MasterServices master, MasterRegion masterRegion) {
+      super(master, masterRegion);
     }
 
     @Override
@@ -202,8 +203,9 @@ public class TestWakeUpUnexpectedProcedure {
     }
 
     @Override
-    protected AssignmentManager createAssignmentManager(MasterServices master) {
-      return new AMForTest(master);
+    protected AssignmentManager createAssignmentManager(MasterServices master,
+      MasterRegion masterRegion) {
+      return new AMForTest(master, masterRegion);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
index 694ca32..cd39e99 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
@@ -80,7 +80,7 @@ public class TestRegionProcedureStoreMigration {
     Configuration conf = htu.getConfiguration();
     conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, false);
     // Runs on local filesystem. Test does not need sync. Turn off checks.
-    htu.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false);
+    conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false);
     Path testDir = htu.getDataTestDir();
     CommonFSUtils.setRootDir(conf, testDir);
     walStore = new WALProcedureStore(conf, new LeaseRecovery() {