You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2019/02/06 06:16:56 UTC

[hbase] branch HBASE-21512 updated (5d52262 -> 03cf5bd)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a change to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git.


 discard 5d52262  HBASE-21778 Remove the usage of the locateRegion related methods in ClusterConnection
 discard 457df3a  HBASE-21719 Rewrite RegionPlacementMaintainer to use AsyncClusterConnection
 discard 24e3722  HBASE-21537 Rewrite ServerManager.closeRegionSilentlyAndWait to use AsyncClusterConnection
 discard af8ea9b  HBASE-21671 Rewrite RegionReplicaReplicationEndpoint to use AsyncClusterConnection
 discard fc7aec9  HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection
 discard 57202f1  HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint
 discard aaff60a  HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin
 discard 630a191  HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager
 discard a36296d  HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer
     add 65caa08  HBASE-21812 Address ruby static analysis for shell/bin modules [2nd pass]
     add 8d75ac8  HBASE-21765 Website should point to lists.apache.org
     add d69c3e5  HBASE-21840 TestHRegionWithInMemoryFlush fails with NPE
     add 5f8bdd5  HBASE-21795 Client application may get stuck (time bound) if a table modify op is called immediately after split op
     add af92322  HBASE-19616 Review of LogCleaner Class
     add b24f19b  Adding xucang as a developer
     new 3e75b60  HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer
     new 75b670d  HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager
     new afd8f14  HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin
     new 8cb5a63  HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint
     new 8c3993a  HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection
     new 975cd99  HBASE-21671 Rewrite RegionReplicaReplicationEndpoint to use AsyncClusterConnection
     new 4d99866  HBASE-21537 Rewrite ServerManager.closeRegionSilentlyAndWait to use AsyncClusterConnection
     new d2ef2ad  HBASE-21719 Rewrite RegionPlacementMaintainer to use AsyncClusterConnection
     new 03cf5bd  HBASE-21778 Remove the usage of the locateRegion related methods in ClusterConnection

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (5d52262)
            \
             N -- N -- N   refs/heads/HBASE-21512 (03cf5bd)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 9 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 bin/draining_servers.rb                            |  22 +--
 bin/hirb.rb                                        |   4 +-
 bin/region_status.rb                               |   1 +
 bin/shutdown_regionserver.rb                       |  12 +-
 .../hbase/master/assignment/AssignmentManager.java |   4 +-
 .../hadoop/hbase/master/cleaner/LogCleaner.java    | 153 +++++++++------------
 .../regionserver/RegionServicesForStores.java      |  18 ++-
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |  61 ++++++++
 .../hbase/master/cleaner/TestLogsCleaner.java      | 146 ++++++++++----------
 pom.xml                                            |  16 ++-
 10 files changed, 248 insertions(+), 189 deletions(-)


[hbase] 05/09: HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 8c3993a33d841de9eafda4ff5f6106e2abef22a2
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Wed Dec 12 09:33:33 2018 +0800

    HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection
---
 .../hbase/client/AsyncClusterConnection.java       |   8 ++
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |   8 ++
 .../hbase/client/ClusterConnectionFactory.java     |  16 +--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  36 ++++---
 .../master/procedure/RSProcedureDispatcher.java    |  34 +------
 .../hbase/protobuf/ReplicationProtbufUtil.java     |  15 +--
 .../hadoop/hbase/regionserver/HRegionServer.java   |   3 +-
 .../handler/RegionReplicaFlushHandler.java         | 110 ++++++++++++---------
 8 files changed, 110 insertions(+), 120 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index 1327fd7..f1f64ca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+
 /**
  * The asynchronous connection for internal usage.
  */
@@ -41,4 +44,9 @@ public interface AsyncClusterConnection extends AsyncConnection {
    * Get the rpc client we used to communicate with other servers.
    */
   RpcClient getRpcClient();
+
+  /**
+   * Flush a region and get the response.
+   */
+  CompletableFuture<FlushRegionResponse> flush(byte[] regionName, boolean writeFlushWALMarker);
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 8cb19a6..0808e90 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -55,6 +55,7 @@ import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
@@ -363,4 +364,11 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
   public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
     return new AsyncRegionServerAdmin(serverName, this);
   }
+
+  @Override
+  public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
+      boolean writeFlushWALMarker) {
+    RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin();
+    return admin.flushRegionInternal(regionName, writeFlushWALMarker);
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
index 68c0630..79484db 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -18,15 +18,12 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.SocketAddress;
-import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-
 /**
  * The factory for creating {@link AsyncClusterConnection}.
  */
@@ -48,16 +45,7 @@ public final class ClusterConnectionFactory {
   public static AsyncClusterConnection createAsyncClusterConnection(Configuration conf,
       SocketAddress localAddress, User user) throws IOException {
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(conf);
-    String clusterId;
-    try {
-      clusterId = registry.getClusterId().get();
-    } catch (InterruptedException e) {
-      throw (IOException) new InterruptedIOException().initCause(e);
-    } catch (ExecutionException e) {
-      Throwable cause = e.getCause();
-      Throwables.propagateIfPossible(cause, IOException.class);
-      throw new IOException(cause);
-    }
+    String clusterId = FutureUtils.get(registry.getClusterId());
     return new AsyncConnectionImpl(conf, registry, clusterId, localAddress, user);
   }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index d4b60fb..d06bfd4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -851,7 +851,19 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
 
   @Override
   public CompletableFuture<Void> flushRegion(byte[] regionName) {
-    CompletableFuture<Void> future = new CompletableFuture<>();
+    return flushRegionInternal(regionName, false).thenAccept(r -> {
+    });
+  }
+
+  /**
+   * This method is for internal use only, where we need the response of the flush.
+   * <p/>
+   * As it exposes the protobuf message, please do <strong>NOT</strong> try to expose it as a public
+   * API.
+   */
+  CompletableFuture<FlushRegionResponse> flushRegionInternal(byte[] regionName,
+      boolean writeFlushWALMarker) {
+    CompletableFuture<FlushRegionResponse> future = new CompletableFuture<>();
     addListener(getRegionLocation(regionName), (location, err) -> {
       if (err != null) {
         future.completeExceptionally(err);
@@ -863,7 +875,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
           .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
         return;
       }
-      addListener(flush(serverName, location.getRegion()), (ret, err2) -> {
+      addListener(flush(serverName, location.getRegion(), writeFlushWALMarker), (ret, err2) -> {
         if (err2 != null) {
           future.completeExceptionally(err2);
         } else {
@@ -874,15 +886,14 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
     return future;
   }
 
-  private CompletableFuture<Void> flush(final ServerName serverName, final RegionInfo regionInfo) {
-    return this.<Void> newAdminCaller()
-            .serverName(serverName)
-            .action(
-              (controller, stub) -> this.<FlushRegionRequest, FlushRegionResponse, Void> adminCall(
-                controller, stub, RequestConverter.buildFlushRegionRequest(regionInfo
-                  .getRegionName()), (s, c, req, done) -> s.flushRegion(c, req, done),
-                resp -> null))
-            .call();
+  private CompletableFuture<FlushRegionResponse> flush(ServerName serverName, RegionInfo regionInfo,
+      boolean writeFlushWALMarker) {
+    return this.<FlushRegionResponse> newAdminCaller().serverName(serverName)
+      .action((controller, stub) -> this
+        .<FlushRegionRequest, FlushRegionResponse, FlushRegionResponse> adminCall(controller, stub,
+          RequestConverter.buildFlushRegionRequest(regionInfo.getRegionName(), writeFlushWALMarker),
+          (s, c, req, done) -> s.flushRegion(c, req, done), resp -> resp))
+      .call();
   }
 
   @Override
@@ -895,7 +906,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
       }
       List<CompletableFuture<Void>> compactFutures = new ArrayList<>();
       if (hRegionInfos != null) {
-        hRegionInfos.forEach(region -> compactFutures.add(flush(sn, region)));
+        hRegionInfos.forEach(region -> compactFutures.add(flush(sn, region, false).thenAccept(r -> {
+        })));
       }
       addListener(CompletableFuture.allOf(
         compactFutures.toArray(new CompletableFuture<?>[compactFutures.size()])), (ret, err2) -> {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index f3ab4b3..f772b68 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -18,11 +18,9 @@
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
@@ -35,12 +33,12 @@ import org.apache.hadoop.hbase.master.ServerListener;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
 import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
 import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
@@ -341,15 +339,7 @@ public class RSProcedureDispatcher
 
     protected ExecuteProceduresResponse sendRequest(final ServerName serverName,
         final ExecuteProceduresRequest request) throws IOException {
-      try {
-        return getRsAdmin().executeProcedures(request).get();
-      } catch (InterruptedException e) {
-        throw (IOException) new InterruptedIOException().initCause(e);
-      } catch (ExecutionException e) {
-        Throwable cause = e.getCause();
-        Throwables.propagateIfPossible(cause, IOException.class);
-        throw new IOException(cause);
-      }
+      return FutureUtils.get(getRsAdmin().executeProcedures(request));
     }
 
     protected void remoteCallFailed(final MasterProcedureEnv env, final IOException e) {
@@ -408,15 +398,7 @@ public class RSProcedureDispatcher
 
     private OpenRegionResponse sendRequest(final ServerName serverName,
         final OpenRegionRequest request) throws IOException {
-      try {
-        return getRsAdmin().openRegion(request).get();
-      } catch (InterruptedException e) {
-        throw (IOException) new InterruptedIOException().initCause(e);
-      } catch (ExecutionException e) {
-        Throwable cause = e.getCause();
-        Throwables.propagateIfPossible(cause, IOException.class);
-        throw new IOException(cause);
-      }
+      return FutureUtils.get(getRsAdmin().openRegion(request));
     }
 
     private void remoteCallFailed(final MasterProcedureEnv env, final IOException e) {
@@ -458,15 +440,7 @@ public class RSProcedureDispatcher
 
     private CloseRegionResponse sendRequest(final ServerName serverName,
         final CloseRegionRequest request) throws IOException {
-      try {
-        return getRsAdmin().closeRegion(request).get();
-      } catch (InterruptedException e) {
-        throw (IOException) new InterruptedIOException().initCause(e);
-      } catch (ExecutionException e) {
-        Throwable cause = e.getCause();
-        Throwables.propagateIfPossible(cause, IOException.class);
-        throw new IOException(cause);
-      }
+      return FutureUtils.get(getRsAdmin().closeRegion(request));
     }
 
     private void remoteCallCompleted(final MasterProcedureEnv env,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 74fad26..9f41a76 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -18,13 +18,10 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
-
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
@@ -32,12 +29,12 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
@@ -60,15 +57,7 @@ public class ReplicationProtbufUtil {
       throws IOException {
     Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(
       entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
-    try {
-      admin.replicateWALEntry(p.getFirst(), p.getSecond()).get();
-    } catch (InterruptedException e) {
-      throw (IOException) new InterruptedIOException().initCause(e);
-    } catch (ExecutionException e) {
-      Throwable cause = e.getCause();
-      Throwables.propagateIfPossible(cause, IOException.class);
-      throw new IOException(e);
-    }
+    FutureUtils.get(admin.replicateWALEntry(p.getFirst(), p.getSecond()));
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2e54907..880525d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2390,8 +2390,7 @@ public class HRegionServer extends HasThread implements
 
     // submit it to be handled by one of the handlers so that we do not block OpenRegionHandler
     if (this.executorService != null) {
-      this.executorService.submit(new RegionReplicaFlushHandler(this, clusterConnection,
-          rpcRetryingCallerFactory, rpcControllerFactory, operationTimeout, region));
+      this.executorService.submit(new RegionReplicaFlushHandler(this, region));
     }
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index 81b6d7e..0729203 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -20,26 +20,23 @@ package org.apache.hadoop.hbase.regionserver.handler;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.FlushRegionCallable;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 
 /**
  * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to wal in
@@ -56,20 +53,13 @@ public class RegionReplicaFlushHandler extends EventHandler {
 
   private static final Logger LOG = LoggerFactory.getLogger(RegionReplicaFlushHandler.class);
 
-  private final ClusterConnection connection;
-  private final RpcRetryingCallerFactory rpcRetryingCallerFactory;
-  private final RpcControllerFactory rpcControllerFactory;
-  private final int operationTimeout;
+  private final AsyncClusterConnection connection;
+
   private final HRegion region;
 
-  public RegionReplicaFlushHandler(Server server, ClusterConnection connection,
-      RpcRetryingCallerFactory rpcRetryingCallerFactory, RpcControllerFactory rpcControllerFactory,
-      int operationTimeout, HRegion region) {
+  public RegionReplicaFlushHandler(Server server, HRegion region) {
     super(server, EventType.RS_REGION_REPLICA_FLUSH);
-    this.connection = connection;
-    this.rpcRetryingCallerFactory = rpcRetryingCallerFactory;
-    this.rpcControllerFactory = rpcControllerFactory;
-    this.operationTimeout = operationTimeout;
+    this.connection = server.getAsyncClusterConnection();
     this.region = region;
   }
 
@@ -103,7 +93,7 @@ public class RegionReplicaFlushHandler extends EventHandler {
     return numRetries;
   }
 
-  void triggerFlushInPrimaryRegion(final HRegion region) throws IOException, RuntimeException {
+  void triggerFlushInPrimaryRegion(final HRegion region) throws IOException {
     long pause = connection.getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
 
@@ -117,45 +107,59 @@ public class RegionReplicaFlushHandler extends EventHandler {
     }
     while (!region.isClosing() && !region.isClosed()
         && !server.isAborted() && !server.isStopped()) {
-      FlushRegionCallable flushCallable = new FlushRegionCallable(
-        connection, rpcControllerFactory,
-        RegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()), true);
-
       // TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we
       // do not have to wait for the whole flush here, just initiate it.
-      FlushRegionResponse response = null;
+      FlushRegionResponse response;
       try {
-         response = rpcRetryingCallerFactory.<FlushRegionResponse>newCaller()
-          .callWithRetries(flushCallable, this.operationTimeout);
-      } catch (IOException ex) {
-        if (ex instanceof TableNotFoundException
-            || connection.isTableDisabled(region.getRegionInfo().getTable())) {
+        response = FutureUtils.get(connection.flush(ServerRegionReplicaUtil
+          .getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionName(), true));
+      } catch (IOException e) {
+        if (e instanceof TableNotFoundException || FutureUtils
+          .get(connection.getAdmin().isTableDisabled(region.getRegionInfo().getTable()))) {
           return;
         }
-        throw ex;
+        if (!counter.shouldRetry()) {
+          throw e;
+        }
+        // The reason that why we need to retry here is that, the retry for asynchronous admin
+        // request is much simpler than the normal operation, if we failed to locate the region once
+        // then we will throw the exception out and will not try to relocate again. So here we need
+        // to add some retries by ourselves to prevent shutting down the region server too
+        // frequent...
+        LOG.debug("Failed to trigger a flush of primary region replica {} of region {}, retry={}",
+          ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
+            .getRegionNameAsString(),
+          region.getRegionInfo().getRegionNameAsString(), counter.getAttemptTimes(), e);
+        try {
+          counter.sleepUntilNextRetry();
+        } catch (InterruptedException e1) {
+          throw new InterruptedIOException(e1.getMessage());
+        }
+        continue;
       }
 
       if (response.getFlushed()) {
         // then we have to wait for seeing the flush entry. All reads will be rejected until we see
         // a complete flush cycle or replay a region open event
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Successfully triggered a flush of primary region replica "
-              + ServerRegionReplicaUtil
-                .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName()
-                + " of region " + region.getRegionInfo().getEncodedName()
-                + " Now waiting and blocking reads until observing a full flush cycle");
+          LOG.debug("Successfully triggered a flush of primary region replica " +
+            ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
+              .getRegionNameAsString() +
+            " of region " + region.getRegionInfo().getRegionNameAsString() +
+            " Now waiting and blocking reads until observing a full flush cycle");
         }
         region.setReadsEnabled(true);
         break;
       } else {
         if (response.hasWroteFlushWalMarker()) {
-          if(response.getWroteFlushWalMarker()) {
+          if (response.getWroteFlushWalMarker()) {
             if (LOG.isDebugEnabled()) {
-              LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary "
-                  + "region replica " + ServerRegionReplicaUtil
-                    .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName()
-                  + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and "
-                  + "blocking reads until observing a flush marker");
+              LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " +
+                "region replica " +
+                ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
+                  .getRegionNameAsString() +
+                " of region " + region.getRegionInfo().getRegionNameAsString() +
+                " Now waiting and " + "blocking reads until observing a flush marker");
             }
             region.setReadsEnabled(true);
             break;
@@ -164,15 +168,23 @@ public class RegionReplicaFlushHandler extends EventHandler {
             // closing or already flushing. Retry flush again after some sleep.
             if (!counter.shouldRetry()) {
               throw new IOException("Cannot cause primary to flush or drop a wal marker after " +
-                  "retries. Failing opening of this region replica "
-                  + region.getRegionInfo().getEncodedName());
+                counter.getAttemptTimes() + " retries. Failing opening of this region replica " +
+                region.getRegionInfo().getRegionNameAsString());
+            } else {
+              LOG.warn(
+                "Cannot cause primary replica {} to flush or drop a wal marker " +
+                  "for region replica {}, retry={}",
+                ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
+                  .getRegionNameAsString(),
+                region.getRegionInfo().getRegionNameAsString(), counter.getAttemptTimes());
             }
           }
         } else {
           // nothing to do. Are we dealing with an old server?
-          LOG.warn("Was not able to trigger a flush from primary region due to old server version? "
-              + "Continuing to open the secondary region replica: "
-              + region.getRegionInfo().getEncodedName());
+          LOG.warn(
+            "Was not able to trigger a flush from primary region due to old server version? " +
+              "Continuing to open the secondary region replica: " +
+              region.getRegionInfo().getRegionNameAsString());
           region.setReadsEnabled(true);
           break;
         }


[hbase] 09/09: HBASE-21778 Remove the usage of the locateRegion related methods in ClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 03cf5bd5d7859f040403dbb6112c517bc21590e9
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Fri Feb 1 16:40:34 2019 +0800

    HBASE-21778 Remove the usage of the locateRegion related methods in ClusterConnection
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../apache/hadoop/hbase/client/AsyncProcess.java   |   4 +-
 .../hadoop/hbase/client/BufferedMutatorImpl.java   |   3 +-
 .../hbase/client/ClientAsyncPrefetchScanner.java   |   4 +-
 .../apache/hadoop/hbase/client/ClientScanner.java  |   6 +-
 .../hadoop/hbase/client/ClientServiceCallable.java |   4 +-
 .../hadoop/hbase/client/ClientSimpleScanner.java   |   4 +-
 .../hadoop/hbase/client/ClusterConnection.java     | 149 ---------------------
 .../hbase/client/ConnectionImplementation.java     | 127 ++++++++++++------
 .../hadoop/hbase/client/FlushRegionCallable.java   |   6 +-
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |  10 +-
 .../org/apache/hadoop/hbase/client/HTable.java     |   4 +-
 .../hbase/client/RegionAdminServiceCallable.java   |  12 +-
 .../hadoop/hbase/client/RegionServerCallable.java  |  10 +-
 .../hadoop/hbase/client/ReversedClientScanner.java |   4 +-
 .../client/RpcRetryingCallerWithReadReplicas.java  |  36 +++--
 .../hbase/client/ScannerCallableWithReplicas.java  |  27 ++--
 .../hadoop/hbase/client/TestAsyncProcess.java      |  98 +++++++-------
 .../TestAsyncProcessWithRegionException.java       |  10 +-
 .../hadoop/hbase/client/TestBufferedMutator.java   |   2 +-
 .../hadoop/hbase/client/TestClientScanner.java     |   8 +-
 .../hbase/client/TestReversedScannerCallable.java  |   3 +-
 ...nTestTimeBoundedRequestsWithRegionReplicas.java |  13 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |  34 ++---
 .../apache/hadoop/hbase/util/RegionSplitter.java   |  19 ++-
 .../hbase/client/HConnectionTestingUtility.java    | 105 +++++++--------
 .../apache/hadoop/hbase/client/TestCISleep.java    |   4 +-
 .../hbase/client/TestHBaseAdminNoCluster.java      |   7 +-
 .../TestMetaTableAccessorNoCluster.java            |  47 ++++---
 .../hadoop/hbase/client/TestMetaWithReplicas.java  |  34 ++---
 .../hbase/client/TestReplicaWithCluster.java       |  49 ++++---
 .../hadoop/hbase/client/TestReplicasClient.java    |  11 +-
 .../hbase/client/TestSeparateClientZKCluster.java  |   4 +-
 .../client/TestSnapshotCloneIndependence.java      |   2 +-
 .../master/TestMetaAssignmentWithStopMaster.java   |  48 +++----
 .../TestLoadIncrementalHFilesSplitRecovery.java    |  27 ++--
 .../hadoop/hbase/util/BaseTestHBaseFsck.java       |   3 +-
 .../hadoop/hbase/util/MultiThreadedAction.java     |  23 ++--
 .../hadoop/hbase/util/MultiThreadedReader.java     |  11 +-
 .../hadoop/hbase/util/MultiThreadedWriterBase.java |   8 +-
 39 files changed, 431 insertions(+), 549 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index de7449b..b0f863f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -136,7 +136,7 @@ class AsyncProcess {
   // TODO: many of the fields should be made private
   final long id;
 
-  final ClusterConnection connection;
+  final ConnectionImplementation connection;
   private final RpcRetryingCallerFactory rpcCallerFactory;
   final RpcControllerFactory rpcFactory;
 
@@ -161,7 +161,7 @@ class AsyncProcess {
   public static final String LOG_DETAILS_PERIOD = "hbase.client.log.detail.period.ms";
   private static final int DEFAULT_LOG_DETAILS_PERIOD = 10000;
   private final int periodToLog;
-  AsyncProcess(ClusterConnection hc, Configuration conf,
+  AsyncProcess(ConnectionImplementation hc, Configuration conf,
       RpcRetryingCallerFactory rpcCaller, RpcControllerFactory rpcFactory) {
     if (hc == null) {
       throw new IllegalArgumentException("ClusterConnection cannot be null.");
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index d4bc811..9740ce2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -140,7 +140,8 @@ public class BufferedMutatorImpl implements BufferedMutator {
             params.getOperationTimeout() : conn.getConnectionConfiguration().getOperationTimeout());
     this.ap = ap;
   }
-  BufferedMutatorImpl(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory,
+
+  BufferedMutatorImpl(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory,
       RpcControllerFactory rpcFactory, BufferedMutatorParams params) {
     this(conn, params,
       // puts need to track errors globally due to how the APIs currently work.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
index e5af871..1a9178c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
@@ -67,11 +67,11 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
   private final Condition notFull = lock.newCondition();
 
   public ClientAsyncPrefetchScanner(Configuration configuration, Scan scan, TableName name,
-      ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory,
       RpcControllerFactory rpcControllerFactory, ExecutorService pool,
       int replicaCallTimeoutMicroSecondScan) throws IOException {
     super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool,
-        replicaCallTimeoutMicroSecondScan);
+      replicaCallTimeoutMicroSecondScan);
   }
 
   @VisibleForTesting
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 3dbe5f4..ee95a24 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -71,7 +71,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
   // Keep lastResult returned successfully in case we have to reset scanner.
   protected Result lastResult = null;
   protected final long maxScannerResultSize;
-  private final ClusterConnection connection;
+  private final ConnectionImplementation connection;
   protected final TableName tableName;
   protected final int scannerTimeout;
   protected boolean scanMetricsPublished = false;
@@ -97,7 +97,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
    * @throws IOException
    */
   public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
-      ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory,
       RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
       throws IOException {
     if (LOG.isTraceEnabled()) {
@@ -141,7 +141,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
     initCache();
   }
 
-  protected ClusterConnection getConnection() {
+  protected ConnectionImplementation getConnection() {
     return this.connection;
   }
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
index f118e7a..67ba838 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
@@ -34,9 +34,9 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 public abstract class ClientServiceCallable<T> extends
     RegionServerCallable<T, ClientProtos.ClientService.BlockingInterface> {
 
-  public ClientServiceCallable(Connection connection, TableName tableName, byte [] row,
+  public ClientServiceCallable(Connection connection, TableName tableName, byte[] row,
       RpcController rpcController, int priority) {
-    super(connection, tableName, row, rpcController, priority);
+    super((ConnectionImplementation) connection, tableName, row, rpcController, priority);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
index 7e9c4b9..e5d7b97 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
@@ -37,11 +37,11 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 @InterfaceAudience.Private
 public class ClientSimpleScanner extends ClientScanner {
   public ClientSimpleScanner(Configuration configuration, Scan scan, TableName name,
-      ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory,
       RpcControllerFactory rpcControllerFactory, ExecutorService pool,
       int replicaCallTimeoutMicroSecondScan) throws IOException {
     super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool,
-        replicaCallTimeoutMicroSecondScan);
+      replicaCallTimeoutMicroSecondScan);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 9b2222b..7828ef0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -20,11 +20,8 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.util.List;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
@@ -95,135 +92,6 @@ public interface ClusterConnection extends Connection {
   TableState getTableState(TableName tableName)  throws IOException;
 
   /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @return HRegionLocation that describes where to find the region in
-   *   question
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation locateRegion(final TableName tableName,
-      final byte [] row) throws IOException;
-
-  /**
-   * @deprecated {@link #clearRegionLocationCache()} instead.
-   */
-  @Deprecated
-  default void clearRegionCache() {
-    clearRegionLocationCache();
-  }
-
-  void cacheLocation(final TableName tableName, final RegionLocations location);
-
-  /**
-   * Allows flushing the region cache of all locations that pertain to
-   * <code>tableName</code>
-   * @param tableName Name of the table whose regions we are to remove from
-   *   cache.
-   */
-  void clearRegionCache(final TableName tableName);
-
-  /**
-   * Deletes cached locations for the specific region.
-   * @param location The location object for the region, to be purged from cache.
-   */
-  void deleteCachedRegionLocation(final HRegionLocation location);
-
-  /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in, ignoring any value that might be in the cache.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @return HRegionLocation that describes where to find the region in
-   *   question
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation relocateRegion(final TableName tableName,
-      final byte [] row) throws IOException;
-
-  /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in, ignoring any value that might be in the cache.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @param replicaId the replicaId of the region
-   * @return RegionLocations that describe where to find the region in
-   *   question
-   * @throws IOException if a remote or network exception occurs
-   */
-  RegionLocations relocateRegion(final TableName tableName,
-      final byte [] row, int replicaId) throws IOException;
-
-  /**
-   * Update the location cache. This is used internally by HBase, in most cases it should not be
-   *  used by the client application.
-   * @param tableName the table name
-   * @param regionName the region name
-   * @param rowkey the row
-   * @param exception the exception if any. Can be null.
-   * @param source the previous location
-   */
-  void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey,
-                                    Object exception, ServerName source);
-
-  /**
-   * Gets the location of the region of <i>regionName</i>.
-   * @param regionName name of the region to locate
-   * @return HRegionLocation that describes where to find the region in
-   *   question
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation locateRegion(final byte[] regionName)
-  throws IOException;
-
-  /**
-   * Gets the locations of all regions in the specified table, <i>tableName</i>.
-   * @param tableName table to get regions of
-   * @return list of region locations for all regions of table
-   * @throws IOException if IO failure occurs
-   */
-  List<HRegionLocation> locateRegions(final TableName tableName) throws IOException;
-
-  /**
-   * Gets the locations of all regions in the specified table, <i>tableName</i>.
-   * @param tableName table to get regions of
-   * @param useCache Should we use the cache to retrieve the region information.
-   * @param offlined True if we are to include offlined regions, false and we'll leave out offlined
-   *          regions from returned list.
-   * @return list of region locations for all regions of table
-   * @throws IOException if IO failure occurs
-   */
-  List<HRegionLocation> locateRegions(final TableName tableName,
-      final boolean useCache,
-      final boolean offlined) throws IOException;
-
-  /**
-   *
-   * @param tableName table to get regions of
-   * @param row the row
-   * @param useCache Should we use the cache to retrieve the region information.
-   * @param retry do we retry
-   * @return region locations for this row.
-   * @throws IOException if IO failure occurs
-   */
-  RegionLocations locateRegion(TableName tableName,
-                               byte[] row, boolean useCache, boolean retry) throws IOException;
-
- /**
-  *
-  * @param tableName table to get regions of
-  * @param row the row
-  * @param useCache Should we use the cache to retrieve the region information.
-  * @param retry do we retry
-  * @param replicaId the replicaId for the region
-  * @return region locations for this row.
-  * @throws IOException if IO failure occurs
-  */
-  RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry,
-     int replicaId) throws IOException;
-
-  /**
    * Returns a {@link MasterKeepAliveConnection} to the active master
    */
   MasterKeepAliveConnection getMaster() throws IOException;
@@ -253,23 +121,6 @@ public interface ClusterConnection extends Connection {
   ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException;
 
   /**
-   * Find region location hosting passed row
-   * @param tableName table name
-   * @param row Row to find.
-   * @param reload If true do not use cache, otherwise bypass.
-   * @return Location of row.
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reload)
-      throws IOException;
-
-  /**
-   * Clear any caches that pertain to server name <code>sn</code>.
-   * @param sn A server name
-   */
-  void clearCaches(final ServerName sn);
-
-  /**
    * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration.
    */
   NonceGenerator getNonceGenerator();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 5df3c07..c72d97d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -609,9 +609,16 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return true;
   }
 
-  @Override
-  public HRegionLocation getRegionLocation(final TableName tableName, final byte[] row,
-      boolean reload) throws IOException {
+  /**
+   * Find region location hosting passed row
+   * @param tableName table name
+   * @param row Row to find.
+   * @param reload If true do not use cache, otherwise bypass.
+   * @return Location of row.
+   * @throws IOException if a remote or network exception occurs
+   */
+  HRegionLocation getRegionLocation(final TableName tableName, final byte[] row, boolean reload)
+      throws IOException {
     return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row);
   }
 
@@ -681,13 +688,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     }
   }
 
-  @Override
-  public HRegionLocation locateRegion(final byte[] regionName) throws IOException {
-    RegionLocations locations = locateRegion(RegionInfo.getTable(regionName),
-      RegionInfo.getStartKey(regionName), false, true);
-    return locations == null ? null : locations.getRegionLocation();
-  }
-
   private boolean isDeadServer(ServerName sn) {
     if (clusterStatusListener == null) {
       return false;
@@ -696,13 +696,26 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     }
   }
 
-  @Override
-  public List<HRegionLocation> locateRegions(TableName tableName) throws IOException {
+  /**
+   * Gets the locations of all regions in the specified table, <i>tableName</i>.
+   * @param tableName table to get regions of
+   * @return list of region locations for all regions of table
+   * @throws IOException if IO failure occurs
+   */
+  List<HRegionLocation> locateRegions(TableName tableName) throws IOException {
     return locateRegions(tableName, false, true);
   }
 
-  @Override
-  public List<HRegionLocation> locateRegions(TableName tableName, boolean useCache,
+  /**
+   * Gets the locations of all regions in the specified table, <i>tableName</i>.
+   * @param tableName table to get regions of
+   * @param useCache Should we use the cache to retrieve the region information.
+   * @param offlined True if we are to include offlined regions, false and we'll leave out offlined
+   *          regions from returned list.
+   * @return list of region locations for all regions of table
+   * @throws IOException if IO failure occurs
+   */
+  List<HRegionLocation> locateRegions(TableName tableName, boolean useCache,
       boolean offlined) throws IOException {
     List<RegionInfo> regions;
     if (TableName.isMetaTableName(tableName)) {
@@ -727,24 +740,44 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return locations;
   }
 
-  @Override
-  public HRegionLocation locateRegion(final TableName tableName, final byte[] row)
-      throws IOException {
+  /**
+   * Find the location of the region of <i>tableName</i> that <i>row</i> lives in.
+   * @param tableName name of the table <i>row</i> is in
+   * @param row row key you're trying to find the region of
+   * @return HRegionLocation that describes where to find the region in question
+   * @throws IOException if a remote or network exception occurs
+   */
+  HRegionLocation locateRegion(final TableName tableName, final byte[] row) throws IOException {
     RegionLocations locations = locateRegion(tableName, row, true, true);
     return locations == null ? null : locations.getRegionLocation();
   }
 
-  @Override
-  public HRegionLocation relocateRegion(final TableName tableName, final byte[] row)
-      throws IOException {
+  /**
+   * Find the location of the region of <i>tableName</i> that <i>row</i> lives in, ignoring any
+   * value that might be in the cache.
+   * @param tableName name of the table <i>row</i> is in
+   * @param row row key you're trying to find the region of
+   * @return HRegionLocation that describes where to find the region in question
+   * @throws IOException if a remote or network exception occurs
+   */
+  HRegionLocation relocateRegion(final TableName tableName, final byte[] row) throws IOException {
     RegionLocations locations =
       relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID);
     return locations == null ? null
       : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
   }
 
-  @Override
-  public RegionLocations relocateRegion(final TableName tableName,
+  /**
+   * Find the location of the region of <i>tableName</i> that <i>row</i>
+   * lives in, ignoring any value that might be in the cache.
+   * @param tableName name of the table <i>row</i> is in
+   * @param row row key you're trying to find the region of
+   * @param replicaId the replicaId of the region
+   * @return RegionLocations that describe where to find the region in
+   *   question
+   * @throws IOException if a remote or network exception occurs
+   */
+  RegionLocations relocateRegion(final TableName tableName,
       final byte [] row, int replicaId) throws IOException{
     // Since this is an explicit request not to use any caching, finding
     // disabled tables should not be desirable.  This will ensure that an exception is thrown when
@@ -756,14 +789,30 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return locateRegion(tableName, row, false, true, replicaId);
   }
 
-  @Override
-  public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
+  /**
+   * @param tableName table to get regions of
+   * @param row the row
+   * @param useCache Should we use the cache to retrieve the region information.
+   * @param retry do we retry
+   * @return region locations for this row.
+   * @throws IOException if IO failure occurs
+   */
+  RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
       boolean retry) throws IOException {
     return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
   }
 
-  @Override
-  public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
+  /**
+  *
+  * @param tableName table to get regions of
+  * @param row the row
+  * @param useCache Should we use the cache to retrieve the region information.
+  * @param retry do we retry
+  * @param replicaId the replicaId for the region
+  * @return region locations for this row.
+  * @throws IOException if IO failure occurs
+  */
+  RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
       boolean retry, int replicaId) throws IOException {
     checkClosed();
     if (tableName == null || tableName.getName().length == 0) {
@@ -967,8 +1016,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    * @param tableName The table name.
    * @param location the new location
    */
-  @Override
-  public void cacheLocation(final TableName tableName, final RegionLocations location) {
+  void cacheLocation(final TableName tableName, final RegionLocations location) {
     metaCache.cacheLocation(tableName, location);
   }
 
@@ -982,15 +1030,15 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return metaCache.getCachedLocation(tableName, row);
   }
 
-  public void clearRegionCache(final TableName tableName, byte[] row) {
+  void clearRegionCache(final TableName tableName, byte[] row) {
     metaCache.clearCache(tableName, row);
   }
 
-  /*
-   * Delete all cached entries of a table that maps to a specific location.
+  /**
+   * Clear any caches that pertain to server name <code>sn</code>.
+   * @param sn A server name
    */
-  @Override
-  public void clearCaches(final ServerName serverName) {
+  void clearCaches(final ServerName serverName) {
     metaCache.clearCache(serverName);
   }
 
@@ -999,8 +1047,11 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     metaCache.clearCache();
   }
 
-  @Override
-  public void clearRegionCache(final TableName tableName) {
+  /**
+   * Allows flushing the region cache of all locations that pertain to <code>tableName</code>
+   * @param tableName Name of the table whose regions we are to remove from cache.
+   */
+  void clearRegionCache(final TableName tableName) {
     metaCache.clearCache(tableName);
   }
 
@@ -1845,8 +1896,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     cacheLocation(hri.getTable(), source, newHrl);
   }
 
-  @Override
-  public void deleteCachedRegionLocation(final HRegionLocation location) {
+  void deleteCachedRegionLocation(final HRegionLocation location) {
     metaCache.clearCache(location);
   }
 
@@ -1858,8 +1908,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    *   or wrapped or both RegionMovedException
    * @param source server that is the source of the location update.
    */
-  @Override
-  public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey,
+  void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey,
     final Object exception, final ServerName source) {
     if (rowkey == null || tableName == null) {
       LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) +
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java
index bb265a4..d881fe0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
@@ -28,6 +27,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
@@ -42,7 +42,7 @@ public class FlushRegionCallable extends RegionAdminServiceCallable<FlushRegionR
   private final boolean writeFlushWalMarker;
   private boolean reload;
 
-  public FlushRegionCallable(ClusterConnection connection,
+  public FlushRegionCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] regionName,
       byte[] regionStartKey, boolean writeFlushWalMarker) {
     super(connection, rpcControllerFactory, tableName, regionStartKey);
@@ -50,7 +50,7 @@ public class FlushRegionCallable extends RegionAdminServiceCallable<FlushRegionR
     this.writeFlushWalMarker = writeFlushWalMarker;
   }
 
-  public FlushRegionCallable(ClusterConnection connection,
+  public FlushRegionCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, RegionInfo regionInfo,
       boolean writeFlushWalMarker) {
     this(connection, rpcControllerFactory, regionInfo.getTable(), regionInfo.getRegionName(),
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 769ddd7..a447b0c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcController;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -45,7 +44,6 @@ import java.util.function.Supplier;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.CacheEvictionStats;
@@ -248,7 +246,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
 public class HBaseAdmin implements Admin {
   private static final Logger LOG = LoggerFactory.getLogger(HBaseAdmin.class);
 
-  private ClusterConnection connection;
+  private ConnectionImplementation connection;
 
   private final Configuration conf;
   private final long pause;
@@ -269,7 +267,7 @@ public class HBaseAdmin implements Admin {
     return operationTimeout;
   }
 
-  HBaseAdmin(ClusterConnection connection) throws IOException {
+  HBaseAdmin(ConnectionImplementation connection) throws IOException {
     this.conf = connection.getConfiguration();
     this.connection = connection;
 
@@ -755,7 +753,9 @@ public class HBaseAdmin implements Admin {
     protected Void postOperationResult(final Void result, final long deadlineTs)
         throws IOException, TimeoutException {
       // Delete cached information to prevent clients from using old locations
-      ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName());
+      try (RegionLocator locator = getAdmin().getConnection().getRegionLocator(getTableName())) {
+        locator.clearRegionLocationCache();
+      }
       return super.postOperationResult(result, deadlineTs);
     }
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 15a189c..c77c216 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -105,7 +105,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies;
 public class HTable implements Table {
   private static final Logger LOG = LoggerFactory.getLogger(HTable.class);
   private static final Consistency DEFAULT_CONSISTENCY = Consistency.STRONG;
-  private final ClusterConnection connection;
+  private final ConnectionImplementation connection;
   private final TableName tableName;
   private final Configuration configuration;
   private final ConnectionConfiguration connConfiguration;
@@ -953,7 +953,7 @@ public class HTable implements Table {
    * This is a power user function: avoid unless you know the ramifications.
    */
   public void clearRegionCache() {
-    this.connection.clearRegionCache();
+    this.connection.clearRegionLocationCache();
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index ece91bd..33dfefa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -46,25 +46,25 @@ public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<
   protected final RpcControllerFactory rpcControllerFactory;
   private HBaseRpcController controller = null;
 
-  protected final ClusterConnection connection;
+  protected final ConnectionImplementation connection;
   protected HRegionLocation location;
   protected final TableName tableName;
   protected final byte[] row;
   protected final int replicaId;
 
-  public RegionAdminServiceCallable(ClusterConnection connection,
+  public RegionAdminServiceCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] row) {
     this(connection, rpcControllerFactory, null, tableName, row);
   }
 
-  public RegionAdminServiceCallable(ClusterConnection connection,
+  public RegionAdminServiceCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, HRegionLocation location,
       TableName tableName, byte[] row) {
     this(connection, rpcControllerFactory, location,
       tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID);
   }
 
-  public RegionAdminServiceCallable(ClusterConnection connection,
+  public RegionAdminServiceCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, HRegionLocation location,
       TableName tableName, byte[] row, int replicaId) {
     this.connection = connection;
@@ -138,8 +138,8 @@ public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<
     return ConnectionUtils.getPauseTime(pause, tries);
   }
 
-  public static RegionLocations getRegionLocations(
-      ClusterConnection connection, TableName tableName, byte[] row,
+  private static RegionLocations getRegionLocations(
+      ConnectionImplementation connection, TableName tableName, byte[] row,
       boolean useCache, int replicaId)
       throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException {
     RegionLocations rl;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index f709c44..264304e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 // Public but should be package private only it is used by MetaTableAccessor. FIX!!
 @InterfaceAudience.Private
 public abstract class RegionServerCallable<T, S> implements RetryingCallable<T> {
-  private final Connection connection;
+  private final ConnectionImplementation connection;
   private final TableName tableName;
   private final byte[] row;
   /**
@@ -75,12 +75,12 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
    * @param tableName Table name to which <code>row</code> belongs.
    * @param row The row we want in <code>tableName</code>.
    */
-  public RegionServerCallable(Connection connection, TableName tableName, byte [] row,
+  public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row,
       RpcController rpcController) {
     this(connection, tableName, row, rpcController, HConstants.NORMAL_QOS);
   }
 
-  public RegionServerCallable(Connection connection, TableName tableName, byte [] row,
+  public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row,
       RpcController rpcController, int priority) {
     super();
     this.connection = connection;
@@ -162,8 +162,8 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
   /**
    * @return {@link ClusterConnection} instance used by this Callable.
    */
-  protected ClusterConnection getConnection() {
-    return (ClusterConnection) this.connection;
+  protected ConnectionImplementation getConnection() {
+    return this.connection;
   }
 
   protected HRegionLocation getLocation() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
index 53b9641..34c24c0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
@@ -46,11 +46,11 @@ public class ReversedClientScanner extends ClientScanner {
    * @throws IOException
    */
   public ReversedClientScanner(Configuration conf, Scan scan, TableName tableName,
-      ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory,
       RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
       throws IOException {
     super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool,
-        primaryOperationTimeout);
+      primaryOperationTimeout);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 4a31cff..b2d0600 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hbase.client;
 
 
+import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.Collections;
@@ -27,24 +29,22 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET;
 
 /**
  * Caller that goes to replica if the primary region does no answer within a configurable
@@ -58,7 +58,7 @@ public class RpcRetryingCallerWithReadReplicas {
       LoggerFactory.getLogger(RpcRetryingCallerWithReadReplicas.class);
 
   protected final ExecutorService pool;
-  protected final ClusterConnection cConnection;
+  protected final ConnectionImplementation cConnection;
   protected final Configuration conf;
   protected final Get get;
   protected final TableName tableName;
@@ -71,7 +71,7 @@ public class RpcRetryingCallerWithReadReplicas {
 
   public RpcRetryingCallerWithReadReplicas(
       RpcControllerFactory rpcControllerFactory, TableName tableName,
-      ClusterConnection cConnection, final Get get,
+      ConnectionImplementation cConnection, final Get get,
       ExecutorService pool, int retries, int operationTimeout, int rpcTimeout,
       int timeBeforeReplicas) {
     this.rpcControllerFactory = rpcControllerFactory;
@@ -185,19 +185,14 @@ public class RpcRetryingCallerWithReadReplicas {
       } else {
         // We cannot get the primary replica location, it is possible that the region
         // server hosting meta is down, it needs to proceed to try cached replicas.
-        if (cConnection instanceof ConnectionImplementation) {
-          rl = ((ConnectionImplementation)cConnection).getCachedLocation(tableName, get.getRow());
-          if (rl == null) {
-            // No cached locations
-            throw e;
-          }
-
-          // Primary replica location is not known, skip primary replica
-          skipPrimary = true;
-        } else {
-          // For completeness
+        rl = cConnection.getCachedLocation(tableName, get.getRow());
+        if (rl == null) {
+          // No cached locations
           throw e;
         }
+
+        // Primary replica location is not known, skip primary replica
+        skipPrimary = true;
       }
     }
 
@@ -316,9 +311,8 @@ public class RpcRetryingCallerWithReadReplicas {
   }
 
   static RegionLocations getRegionLocations(boolean useCache, int replicaId,
-                 ClusterConnection cConnection, TableName tableName, byte[] row)
+      ConnectionImplementation cConnection, TableName tableName, byte[] row)
       throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException {
-
     RegionLocations rl;
     try {
       if (useCache) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
index bcb81f7..27e5f87 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hbase.client;
 
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.HashSet;
@@ -31,17 +29,18 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
-import org.apache.hadoop.hbase.util.Pair;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
 /**
  * This class has the logic for handling scanners for regions with and without replicas.
@@ -60,7 +59,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
   private static final Logger LOG = LoggerFactory.getLogger(ScannerCallableWithReplicas.class);
   volatile ScannerCallable currentScannerCallable;
   AtomicBoolean replicaSwitched = new AtomicBoolean(false);
-  final ClusterConnection cConnection;
+  final ConnectionImplementation cConnection;
   protected final ExecutorService pool;
   protected final int timeBeforeReplicas;
   private final Scan scan;
@@ -74,7 +73,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
   private boolean someRPCcancelled = false; //required for testing purposes only
   private int regionReplication = 0;
 
-  public ScannerCallableWithReplicas(TableName tableName, ClusterConnection cConnection,
+  public ScannerCallableWithReplicas(TableName tableName, ConnectionImplementation cConnection,
       ScannerCallable baseCallable, ExecutorService pool, int timeBeforeReplicas, Scan scan,
       int retries, int scannerTimeout, int caching, Configuration conf,
       RpcRetryingCaller<Result []> caller) {
@@ -151,19 +150,13 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
       RegionLocations rl = null;
       try {
         rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(true,
-            RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName,
-            currentScannerCallable.getRow());
+          RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName,
+          currentScannerCallable.getRow());
       } catch (RetriesExhaustedException | DoNotRetryIOException e) {
         // We cannot get the primary replica region location, it is possible that the region server
         // hosting meta table is down, it needs to proceed to try cached replicas directly.
-        if (cConnection instanceof ConnectionImplementation) {
-          rl = ((ConnectionImplementation) cConnection)
-              .getCachedLocation(tableName, currentScannerCallable.getRow());
-          if (rl == null) {
-            throw e;
-          }
-        } else {
-          // For completeness
+        rl = cConnection.getCachedLocation(tableName, currentScannerCallable.getRow());
+        if (rl == null) {
           throw e;
         }
       }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 71b21ac..70d10ca 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -175,17 +175,17 @@ public class TestAsyncProcess {
       return r;
     }
 
-    public MyAsyncProcess(ClusterConnection hc, Configuration conf) {
-      super(hc, conf,
-          new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
+    public MyAsyncProcess(ConnectionImplementation hc, Configuration conf) {
+      super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
       service = Executors.newFixedThreadPool(5);
       this.conf = conf;
     }
 
-    public MyAsyncProcess(ClusterConnection hc, Configuration conf, AtomicInteger nbThreads) {
+    public MyAsyncProcess(ConnectionImplementation hc, Configuration conf,
+        AtomicInteger nbThreads) {
       super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
-      service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
-          new SynchronousQueue<>(), new CountingThreadFactory(nbThreads));
+      service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, new SynchronousQueue<>(),
+        new CountingThreadFactory(nbThreads));
     }
 
     public <CResult> AsyncRequestFuture submit(ExecutorService pool, TableName tableName,
@@ -326,7 +326,8 @@ public class TestAsyncProcess {
 
     private final IOException ioe;
 
-    public AsyncProcessWithFailure(ClusterConnection hc, Configuration conf, IOException ioe) {
+    public AsyncProcessWithFailure(ConnectionImplementation hc, Configuration conf,
+        IOException ioe) {
       super(hc, conf);
       this.ioe = ioe;
       serverTrackerTimeout = 1L;
@@ -376,7 +377,7 @@ public class TestAsyncProcess {
       customPrimarySleepMs.put(server, primaryMs);
     }
 
-    public MyAsyncProcessWithReplicas(ClusterConnection hc, Configuration conf) {
+    public MyAsyncProcessWithReplicas(ConnectionImplementation hc, Configuration conf) {
       super(hc, conf);
     }
 
@@ -622,7 +623,7 @@ public class TestAsyncProcess {
   }
 
   private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
     final long defaultHeapSizePerRequest = conn.getConfiguration().getLong(
@@ -718,7 +719,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmit() throws Exception {
-    ClusterConnection hc = createHConnection();
+    ConnectionImplementation hc = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
 
     List<Put> puts = new ArrayList<>(1);
@@ -730,7 +731,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitWithCB() throws Exception {
-    ClusterConnection hc = createHConnection();
+    ConnectionImplementation hc = createConnectionImpl();
     final AtomicInteger updateCalled = new AtomicInteger(0);
     Batch.Callback<Object> cb = new Batch.Callback<Object>() {
       @Override
@@ -751,7 +752,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitBusyRegion() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
     conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
@@ -779,7 +780,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitBusyRegionServer() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
@@ -810,7 +811,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFail() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF);
+    MyAsyncProcess ap = new MyAsyncProcess(createConnectionImpl(), CONF);
 
     List<Put> puts = new ArrayList<>(1);
     Put p = createPut(1, false);
@@ -836,7 +837,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitTrue() throws IOException {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
@@ -885,7 +886,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFailAndSuccess() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF);
+    MyAsyncProcess ap = new MyAsyncProcess(createConnectionImpl(), CONF);
 
     List<Put> puts = new ArrayList<>(3);
     puts.add(createPut(1, false));
@@ -912,7 +913,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFlush() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF);
+    MyAsyncProcess ap = new MyAsyncProcess(createConnectionImpl(), CONF);
 
     List<Put> puts = new ArrayList<>(3);
     puts.add(createPut(1, false));
@@ -929,7 +930,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testTaskCountWithoutClientBackoffPolicy() throws IOException, InterruptedException {
-    ClusterConnection hc = createHConnection();
+    ConnectionImplementation hc = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
     testTaskCount(ap);
   }
@@ -939,7 +940,7 @@ public class TestAsyncProcess {
     Configuration copyConf = new Configuration(CONF);
     copyConf.setBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, true);
     MyClientBackoffPolicy bp = new MyClientBackoffPolicy();
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     Mockito.when(conn.getConfiguration()).thenReturn(copyConf);
     Mockito.when(conn.getStatisticsTracker()).thenReturn(ServerStatisticTracker.create(copyConf));
     Mockito.when(conn.getBackoffPolicy()).thenReturn(bp);
@@ -979,7 +980,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testMaxTask() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
     conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
@@ -1038,8 +1039,8 @@ public class TestAsyncProcess {
     }
   }
 
-  private ClusterConnection createHConnection() throws IOException {
-    ClusterConnection hc = createHConnectionCommon();
+  private ConnectionImplementation createConnectionImpl() throws IOException {
+    ConnectionImplementation hc = createConnectionImplCommon();
     setMockLocation(hc, DUMMY_BYTES_1, new RegionLocations(loc1));
     setMockLocation(hc, DUMMY_BYTES_2, new RegionLocations(loc2));
     setMockLocation(hc, DUMMY_BYTES_3, new RegionLocations(loc3));
@@ -1049,8 +1050,8 @@ public class TestAsyncProcess {
     return hc;
   }
 
-  private ClusterConnection createHConnectionWithReplicas() throws IOException {
-    ClusterConnection hc = createHConnectionCommon();
+  private ConnectionImplementation createConnectionImplWithReplicas() throws IOException {
+    ConnectionImplementation hc = createConnectionImplCommon();
     setMockLocation(hc, DUMMY_BYTES_1, hrls1);
     setMockLocation(hc, DUMMY_BYTES_2, hrls2);
     setMockLocation(hc, DUMMY_BYTES_3, hrls3);
@@ -1069,16 +1070,16 @@ public class TestAsyncProcess {
     return hc;
   }
 
-  private static void setMockLocation(ClusterConnection hc, byte[] row,
+  private static void setMockLocation(ConnectionImplementation hc, byte[] row,
       RegionLocations result) throws IOException {
-    Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row),
-        Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result);
-    Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row),
-        Mockito.anyBoolean(), Mockito.anyBoolean())).thenReturn(result);
+    Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(),
+      Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result);
+    Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(),
+      Mockito.anyBoolean())).thenReturn(result);
   }
 
-  private ClusterConnection createHConnectionCommon() {
-    ClusterConnection hc = Mockito.mock(ClusterConnection.class);
+  private ConnectionImplementation createConnectionImplCommon() {
+    ConnectionImplementation hc = Mockito.mock(ConnectionImplementation.class);
     NonceGenerator ng = Mockito.mock(NonceGenerator.class);
     Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE);
     Mockito.when(hc.getNonceGenerator()).thenReturn(ng);
@@ -1089,7 +1090,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testHTablePutSuccess() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     BufferedMutatorImpl ht = new BufferedMutatorImpl(conn, bufferParam, ap);
@@ -1106,7 +1107,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSettingWriteBufferPeriodicFlushParameters() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
 
     checkPeriodicFlushParameters(conn, ap,
@@ -1152,7 +1153,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testWriteBufferPeriodicFlushTimeoutMs() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
 
@@ -1219,7 +1220,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testBufferedMutatorImplWithSharedPool() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     BufferedMutator ht = new BufferedMutatorImpl(conn, bufferParam, ap);
@@ -1230,7 +1231,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFailedPutAndNewPut() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE)
             .writeBufferSize(0);
@@ -1275,7 +1276,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testBatch() throws IOException, InterruptedException {
-    ClusterConnection conn = new MyConnectionImpl(CONF);
+    ConnectionImplementation conn = new MyConnectionImpl(CONF);
     HTable ht = (HTable) conn.getTable(DUMMY_TABLE);
     ht.multiAp = new MyAsyncProcess(conn, CONF);
 
@@ -1306,7 +1307,7 @@ public class TestAsyncProcess {
   @Test
   public void testErrorsServers() throws IOException {
     Configuration configuration = new Configuration(CONF);
-    ClusterConnection conn = new MyConnectionImpl(configuration);
+    ConnectionImplementation conn = new MyConnectionImpl(configuration);
     MyAsyncProcess ap = new MyAsyncProcess(conn, configuration);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
@@ -1337,7 +1338,7 @@ public class TestAsyncProcess {
     Configuration copyConf = new Configuration(CONF);
     copyConf.setLong(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, readTimeout);
     copyConf.setLong(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, writeTimeout);
-    ClusterConnection conn = new MyConnectionImpl(copyConf);
+    ConnectionImplementation conn = new MyConnectionImpl(copyConf);
     MyAsyncProcess ap = new MyAsyncProcess(conn, copyConf);
     try (HTable ht = (HTable) conn.getTable(DUMMY_TABLE)) {
       ht.multiAp = ap;
@@ -1370,7 +1371,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testErrors() throws IOException {
-    ClusterConnection conn = new MyConnectionImpl(CONF);
+    ConnectionImplementation conn = new MyConnectionImpl(CONF);
     AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, CONF, new IOException("test"));
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
@@ -1394,7 +1395,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testCallQueueTooLarge() throws IOException {
-    ClusterConnection conn = new MyConnectionImpl(CONF);
+    ConnectionImplementation conn = new MyConnectionImpl(CONF);
     AsyncProcessWithFailure ap =
         new AsyncProcessWithFailure(conn, CONF, new CallQueueTooBigException());
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
@@ -1609,7 +1610,7 @@ public class TestAsyncProcess {
     // TODO: this is kind of timing dependent... perhaps it should detect from createCaller
     //       that the replica call has happened and that way control the ordering.
     Configuration conf = new Configuration();
-    ClusterConnection conn = createHConnectionWithReplicas();
+    ConnectionImplementation conn = createConnectionImplWithReplicas();
     conf.setInt(AsyncProcess.PRIMARY_CALL_TIMEOUT_KEY, replicaAfterMs * 1000);
     if (retries >= 0) {
       conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
@@ -1707,16 +1708,15 @@ public class TestAsyncProcess {
   }
 
   static class AsyncProcessForThrowableCheck extends AsyncProcess {
-    public AsyncProcessForThrowableCheck(ClusterConnection hc, Configuration conf) {
-      super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(
-          conf));
+    public AsyncProcessForThrowableCheck(ConnectionImplementation hc, Configuration conf) {
+      super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
     }
   }
 
   @Test
   public void testUncheckedException() throws Exception {
     // Test the case pool.submit throws unchecked exception
-    ClusterConnection hc = createHConnection();
+    ConnectionImplementation hc = createConnectionImpl();
     MyThreadPoolExecutor myPool =
         new MyThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
             new LinkedBlockingQueue<>(200));
@@ -1748,7 +1748,7 @@ public class TestAsyncProcess {
     final int retries = 1;
     myConf.setLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, specialPause);
     myConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-    ClusterConnection conn = new MyConnectionImpl(myConf);
+    ConnectionImplementation conn = new MyConnectionImpl(myConf);
     AsyncProcessWithFailure ap =
         new AsyncProcessWithFailure(conn, myConf, new CallQueueTooBigException());
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
@@ -1807,7 +1807,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testRetryWithExceptionClearsMetaCache() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     Configuration myConf = conn.getConfiguration();
     myConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);
 
@@ -1840,7 +1840,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testQueueRowAccess() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, null, null,
       new BufferedMutatorParams(DUMMY_TABLE).writeBufferSize(100000));
     Put p0 = new Put(DUMMY_BYTES_1).addColumn(DUMMY_BYTES_1, DUMMY_BYTES_1, DUMMY_BYTES_1);
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java
index ffc4e51..2c24aaa 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java
@@ -175,8 +175,8 @@ public class TestAsyncProcessWithRegionException {
     }
   }
 
-  private static ClusterConnection createHConnection() throws IOException {
-    ClusterConnection hc = Mockito.mock(ClusterConnection.class);
+  private static ConnectionImplementation createHConnection() throws IOException {
+    ConnectionImplementation hc = Mockito.mock(ConnectionImplementation.class);
     NonceGenerator ng = Mockito.mock(NonceGenerator.class);
     Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE);
     Mockito.when(hc.getNonceGenerator()).thenReturn(ng);
@@ -190,8 +190,8 @@ public class TestAsyncProcessWithRegionException {
     return hc;
   }
 
-  private static void setMockLocation(ClusterConnection hc, byte[] row, RegionLocations result)
-    throws IOException {
+  private static void setMockLocation(ConnectionImplementation hc, byte[] row,
+      RegionLocations result) throws IOException {
     Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(),
       Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result);
     Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(),
@@ -201,7 +201,7 @@ public class TestAsyncProcessWithRegionException {
   private static class MyAsyncProcess extends AsyncProcess {
     private final ExecutorService service = Executors.newFixedThreadPool(5);
 
-    MyAsyncProcess(ClusterConnection hc, Configuration conf) {
+    MyAsyncProcess(ConnectionImplementation hc, Configuration conf) {
       super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
     }
 
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
index f8e1295..f0375e2 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
@@ -48,7 +48,7 @@ public class TestBufferedMutator {
    * Just to prove that I can insert a BM other than default.
    */
   public static class MyBufferedMutator extends BufferedMutatorImpl {
-    MyBufferedMutator(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory,
+    MyBufferedMutator(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory,
         RpcControllerFactory rpcFactory, BufferedMutatorParams params) {
       super(conn, rpcCallerFactory, rpcFactory, params);
     }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
index 48ca751..9f1f6f3 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
@@ -71,7 +71,7 @@ public class TestClientScanner {
   ExecutorService pool;
   Configuration conf;
 
-  ClusterConnection clusterConn;
+  ConnectionImplementation clusterConn;
   RpcRetryingCallerFactory rpcFactory;
   RpcControllerFactory controllerFactory;
 
@@ -80,7 +80,7 @@ public class TestClientScanner {
 
   @Before
   public void setup() throws IOException {
-    clusterConn = Mockito.mock(ClusterConnection.class);
+    clusterConn = Mockito.mock(ConnectionImplementation.class);
     rpcFactory = Mockito.mock(RpcRetryingCallerFactory.class);
     controllerFactory = Mockito.mock(RpcControllerFactory.class);
     pool = Executors.newSingleThreadExecutor();
@@ -103,11 +103,11 @@ public class TestClientScanner {
     private boolean initialized = false;
 
     public MockClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
-        ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
+        ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory,
         RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
         throws IOException {
       super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool,
-          primaryOperationTimeout);
+        primaryOperationTimeout);
     }
 
     @Override
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
index 1b554f7..7eb2b94 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
@@ -46,7 +45,7 @@ public class TestReversedScannerCallable {
       HBaseClassTestRule.forClass(TestReversedScannerCallable.class);
 
   @Mock
-  private ClusterConnection connection;
+  private ConnectionImplementation connection;
   @Mock
   private Scan scan;
   @Mock
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
index b5e99d2..b9cb167 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
@@ -31,13 +31,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.IntegrationTestIngest;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
@@ -52,6 +51,7 @@ import org.junit.Assert;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 /**
@@ -350,10 +350,11 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
           numReadFailures.addAndGet(1); // fail the test
           for (Result r : results) {
             LOG.error("FAILED FOR " + r);
-            RegionLocations rl = ((ClusterConnection)connection).
-                locateRegion(tableName, r.getRow(), true, true);
-            HRegionLocation locations[] = rl.getRegionLocations();
-            for (HRegionLocation h : locations) {
+            List<HRegionLocation> locs;
+            try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+              locs = locator.getRegionLocations(r.getRow());
+            }
+            for (HRegionLocation h : locs) {
               LOG.error("LOCATION " + h);
             }
           }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 9e5f9e8..34c846d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -99,6 +99,7 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -1948,35 +1949,34 @@ public class HBaseFsck extends Configured implements Closeable {
    * Record the location of the hbase:meta region as found in ZooKeeper.
    */
   private boolean recordMetaRegion() throws IOException {
-    RegionLocations rl = connection.locateRegion(TableName.META_TABLE_NAME,
-        HConstants.EMPTY_START_ROW, false, false);
-    if (rl == null) {
-      errors.reportError(ERROR_CODE.NULL_META_REGION,
-          "META region was not found in ZooKeeper");
+    List<HRegionLocation> locs;
+    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
+      locs = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true);
+    }
+    if (locs == null || locs.isEmpty()) {
+      errors.reportError(ERROR_CODE.NULL_META_REGION, "META region was not found in ZooKeeper");
       return false;
     }
-    for (HRegionLocation metaLocation : rl.getRegionLocations()) {
+    for (HRegionLocation metaLocation : locs) {
       // Check if Meta region is valid and existing
-      if (metaLocation == null ) {
-        errors.reportError(ERROR_CODE.NULL_META_REGION,
-            "META region location is null");
+      if (metaLocation == null) {
+        errors.reportError(ERROR_CODE.NULL_META_REGION, "META region location is null");
         return false;
       }
-      if (metaLocation.getRegionInfo() == null) {
-        errors.reportError(ERROR_CODE.NULL_META_REGION,
-            "META location regionInfo is null");
+      if (metaLocation.getRegion() == null) {
+        errors.reportError(ERROR_CODE.NULL_META_REGION, "META location regionInfo is null");
         return false;
       }
       if (metaLocation.getHostname() == null) {
-        errors.reportError(ERROR_CODE.NULL_META_REGION,
-            "META location hostName is null");
+        errors.reportError(ERROR_CODE.NULL_META_REGION, "META location hostName is null");
         return false;
       }
       ServerName sn = metaLocation.getServerName();
-      MetaEntry m = new MetaEntry(metaLocation.getRegionInfo(), sn, EnvironmentEdgeManager.currentTime());
-      HbckInfo hbckInfo = regionInfoMap.get(metaLocation.getRegionInfo().getEncodedName());
+      MetaEntry m =
+        new MetaEntry(metaLocation.getRegion(), sn, EnvironmentEdgeManager.currentTime());
+      HbckInfo hbckInfo = regionInfoMap.get(metaLocation.getRegion().getEncodedName());
       if (hbckInfo == null) {
-        regionInfoMap.put(metaLocation.getRegionInfo().getEncodedName(), new HbckInfo(m));
+        regionInfoMap.put(metaLocation.getRegion().getEncodedName(), new HbckInfo(m));
       } else {
         hbckInfo.metaEntry = m;
       }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 1b58634..a779d36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -44,22 +44,21 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.NoServerForRegionException;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
@@ -707,7 +706,7 @@ public class RegionSplitter {
     Path tableDir = tableDirAndSplitFile.getFirst();
     FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
     // Clear the cache to forcibly refresh region information
-    ((ClusterConnection)connection).clearRegionCache();
+    connection.clearRegionLocationCache();
     TableDescriptor htd = null;
     try (Table table = connection.getTable(tableName)) {
       htd = table.getDescriptor();
@@ -768,7 +767,7 @@ public class RegionSplitter {
         } catch (NoServerForRegionException nsfre) {
           LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
           physicalSplitting.add(region);
-          ((ClusterConnection)connection).clearRegionCache();
+          connection.clearRegionLocationCache();
         }
       }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index a8beab6..2a5a395 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -20,44 +20,43 @@ package org.apache.hadoop.hbase.client;
 import java.io.IOException;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.util.Threads;
 import org.mockito.Mockito;
 
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+
 /**
- * {@link ClusterConnection} testing utility.
+ * {@link ConnectionImplementation} testing utility.
  */
 public class HConnectionTestingUtility {
   /*
-   * Not part of {@link HBaseTestingUtility} because this class is not
-   * in same package as {@link ClusterConnection}.  Would have to reveal ugly
-   * {@link ConnectionImplementation} innards to HBaseTestingUtility to give it access.
+   * Not part of {@link HBaseTestingUtility} because this class is not in same package as {@link
+   * ConnectionImplementation}. Would have to reveal ugly {@link ConnectionImplementation} innards
+   * to HBaseTestingUtility to give it access.
    */
   /**
-   * Get a Mocked {@link ClusterConnection} that goes with the passed <code>conf</code>
-   * configuration instance.  Minimally the mock will return
-   * &lt;code>conf&lt;/conf> when {@link ClusterConnection#getConfiguration()} is invoked.
-   * Be sure to shutdown the connection when done by calling
-   * {@link Connection#close()} else it will stick around; this is probably not what you want.
+   * Get a Mocked {@link ConnectionImplementation} that goes with the passed <code>conf</code>
+   * configuration instance. Minimally the mock will return &lt;code>conf&lt;/conf> when
+   * {@link ConnectionImplementation#getConfiguration()} is invoked. Be sure to shutdown the
+   * connection when done by calling {@link Connection#close()} else it will stick around; this is
+   * probably not what you want.
    * @param conf configuration
-   * @return ClusterConnection object for <code>conf</code>
+   * @return ConnectionImplementation object for <code>conf</code>
    * @throws ZooKeeperConnectionException
    */
-  public static ClusterConnection getMockedConnection(final Configuration conf)
+  public static ConnectionImplementation getMockedConnection(final Configuration conf)
   throws ZooKeeperConnectionException {
     ConnectionImplementation connection = Mockito.mock(ConnectionImplementation.class);
     Mockito.when(connection.getConfiguration()).thenReturn(conf);
@@ -70,37 +69,30 @@ public class HConnectionTestingUtility {
   }
 
   /**
-   * Calls {@link #getMockedConnection(Configuration)} and then mocks a few
-   * more of the popular {@link ClusterConnection} methods so they do 'normal'
-   * operation (see return doc below for list). Be sure to shutdown the
-   * connection when done by calling {@link Connection#close()} else it will stick around;
-   * this is probably not what you want.
-   *
+   * Calls {@link #getMockedConnection(Configuration)} and then mocks a few more of the popular
+   * {@link ConnectionImplementation} methods so they do 'normal' operation (see return doc below
+   * for list). Be sure to shutdown the connection when done by calling {@link Connection#close()}
+   * else it will stick around; this is probably not what you want.
    * @param conf Configuration to use
-   * @param admin An AdminProtocol; can be null but is usually
-   * itself a mock.
-   * @param client A ClientProtocol; can be null but is usually
-   * itself a mock.
-   * @param sn ServerName to include in the region location returned by this
-   * <code>connection</code>
-   * @param hri RegionInfo to include in the location returned when
-   * getRegionLocator is called on the mocked connection
+   * @param admin An AdminProtocol; can be null but is usually itself a mock.
+   * @param client A ClientProtocol; can be null but is usually itself a mock.
+   * @param sn ServerName to include in the region location returned by this <code>connection</code>
+   * @param hri RegionInfo to include in the location returned when getRegionLocator is called on
+   *          the mocked connection
    * @return Mock up a connection that returns a {@link Configuration} when
-   * {@link ClusterConnection#getConfiguration()} is called, a 'location' when
-   * {@link ClusterConnection#getRegionLocation(org.apache.hadoop.hbase.TableName, byte[], boolean)}
-   * is called,
-   * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
-   * {@link ClusterConnection#getAdmin(ServerName)} is called, returns the passed
-   * {@link ClientProtos.ClientService.BlockingInterface} instance when
-   * {@link ClusterConnection#getClient(ServerName)} is called (Be sure to call
-   * {@link Connection#close()} when done with this mocked Connection.
-   * @throws IOException
+   *         {@link ConnectionImplementation#getConfiguration()} is called, a 'location' when
+   *         {@link ConnectionImplementation#getRegionLocation(TableName,byte[], boolean)}
+   *         is called, and that returns the passed
+   *         {@link AdminProtos.AdminService.BlockingInterface} instance when
+   *         {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns the passed
+   *         {@link ClientProtos.ClientService.BlockingInterface} instance when
+   *         {@link ConnectionImplementation#getClient(ServerName)} is called (Be sure to call
+   *         {@link Connection#close()} when done with this mocked Connection.
    */
-  public static ClusterConnection getMockedConnectionAndDecorate(final Configuration conf,
+  public static ConnectionImplementation getMockedConnectionAndDecorate(final Configuration conf,
       final AdminProtos.AdminService.BlockingInterface admin,
-      final ClientProtos.ClientService.BlockingInterface client,
-      final ServerName sn, final RegionInfo hri)
-  throws IOException {
+      final ClientProtos.ClientService.BlockingInterface client, final ServerName sn,
+      final RegionInfo hri) throws IOException {
     ConnectionImplementation c = Mockito.mock(ConnectionImplementation.class);
     Mockito.when(c.getConfiguration()).thenReturn(conf);
     Mockito.doNothing().when(c).close();
@@ -141,18 +133,17 @@ public class HConnectionTestingUtility {
   }
 
   /**
-   * Get a Mockito spied-upon {@link ClusterConnection} that goes with the passed
-   * <code>conf</code> configuration instance.
-   * Be sure to shutdown the connection when done by calling
-   * {@link Connection#close()} else it will stick around; this is probably not what you want.
+   * Get a Mockito spied-upon {@link ConnectionImplementation} that goes with the passed
+   * <code>conf</code> configuration instance. Be sure to shutdown the connection when done by
+   * calling {@link Connection#close()} else it will stick around; this is probably not what you
+   * want.
    * @param conf configuration
-   * @return ClusterConnection object for <code>conf</code>
-   * @throws ZooKeeperConnectionException
-   * [Dead link]: See also
-   * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
+   * @return ConnectionImplementation object for <code>conf</code>
+   * @throws ZooKeeperConnectionException [Dead link]: See also
+   *           {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
    */
-  public static ClusterConnection getSpiedConnection(final Configuration conf)
-  throws IOException {
+  public static ConnectionImplementation getSpiedConnection(final Configuration conf)
+      throws IOException {
     ConnectionImplementation connection =
       Mockito.spy(new ConnectionImplementation(conf, null, null));
     return connection;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
index 4e5665d..cd27a30 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
@@ -111,8 +111,8 @@ public class TestCISleep extends AbstractTestCITimeout {
     }
 
     RegionAdminServiceCallable<Object> regionAdminServiceCallable =
-      new RegionAdminServiceCallable<Object>((ClusterConnection) TEST_UTIL.getConnection(),
-          new RpcControllerFactory(TEST_UTIL.getConfiguration()), tableName, FAM_NAM) {
+      new RegionAdminServiceCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
+        new RpcControllerFactory(TEST_UTIL.getConfiguration()), tableName, FAM_NAM) {
         @Override
         public Object call(HBaseRpcController controller) throws Exception {
           return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
index a1026a9..ad73592 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
@@ -95,8 +95,9 @@ public class TestHBaseAdminNoCluster {
     configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, count);
     // Get mocked connection.   Getting the connection will register it so when HBaseAdmin is
     // constructed with same configuration, it will find this mocked connection.
-    ClusterConnection connection = HConnectionTestingUtility.getMockedConnection(configuration);
-    // Mock so we get back the master interface.  Make it so when createTable is called, we throw
+    ConnectionImplementation connection =
+      HConnectionTestingUtility.getMockedConnection(configuration);
+    // Mock so we get back the master interface. Make it so when createTable is called, we throw
     // the PleaseHoldException.
     MasterKeepAliveConnection masterAdmin = Mockito.mock(MasterKeepAliveConnection.class);
     Mockito.when(masterAdmin.createTable((RpcController)Mockito.any(),
@@ -292,7 +293,7 @@ public class TestHBaseAdminNoCluster {
     final int count = 10;
     configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, count);
 
-    ClusterConnection connection = mock(ClusterConnection.class);
+    ConnectionImplementation connection = mock(ConnectionImplementation.class);
     when(connection.getConfiguration()).thenReturn(configuration);
     MasterKeepAliveConnection masterAdmin =
         Mockito.mock(MasterKeepAliveConnection.class, new Answer() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
similarity index 86%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
index 5d36ea9..53f5064 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase;
+package org.apache.hadoop.hbase.client;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -26,11 +26,19 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.NavigableMap;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -112,8 +120,8 @@ public class TestMetaTableAccessorNoCluster {
     assertTrue(hri == null);
     // OK, give it what it expects
     kvs.clear();
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
-      HConstants.REGIONINFO_QUALIFIER, RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO)));
+    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER,
+      RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO)));
     hri = MetaTableAccessor.getRegionInfo(Result.create(kvs));
     assertNotNull(hri);
     assertTrue(RegionInfo.COMPARATOR.compare(hri, RegionInfoBuilder.FIRST_META_REGIONINFO) == 0);
@@ -123,8 +131,6 @@ public class TestMetaTableAccessorNoCluster {
    * Test that MetaTableAccessor will ride over server throwing
    * "Server not running" IOEs.
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-3446">HBASE-3446</a>
-   * @throws IOException
-   * @throws InterruptedException
    */
   @Test
   public void testRideOverServerNotRunning()
@@ -135,7 +141,7 @@ public class TestMetaTableAccessorNoCluster {
     // This is a servername we use in a few places below.
     ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
 
-    ClusterConnection connection = null;
+    ConnectionImplementation connection = null;
     try {
       // Mock an ClientProtocol. Our mock implementation will fail a few
       // times when we go to open a scanner.
@@ -190,26 +196,27 @@ public class TestMetaTableAccessorNoCluster {
       // Return the RegionLocations object when locateRegion
       // The ugly format below comes of 'Important gotcha on spying real objects!' from
       // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html
-      Mockito.doReturn(rl).when
-      (connection).locateRegion((TableName)Mockito.any(), (byte[])Mockito.any(),
-              Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt());
+      Mockito.doReturn(rl).when(connection).locateRegion((TableName) Mockito.any(),
+        (byte[]) Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt());
 
       // Now shove our HRI implementation into the spied-upon connection.
-      Mockito.doReturn(implementation).
-        when(connection).getClient(Mockito.any());
+      Mockito.doReturn(implementation).when(connection).getClient(Mockito.any());
 
       // Scan meta for user tables and verify we got back expected answer.
       NavigableMap<RegionInfo, Result> hris =
         MetaTableAccessor.getServerUserRegions(connection, sn);
       assertEquals(1, hris.size());
-      assertTrue(RegionInfo.COMPARATOR.compare(hris.firstEntry().getKey(), RegionInfoBuilder.FIRST_META_REGIONINFO) == 0);
+      assertTrue(RegionInfo.COMPARATOR.compare(hris.firstEntry().getKey(),
+        RegionInfoBuilder.FIRST_META_REGIONINFO) == 0);
       assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow()));
       // Finally verify that scan was called four times -- three times
       // with exception and then on 4th attempt we succeed
-      Mockito.verify(implementation, Mockito.times(4)).
-        scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any());
+      Mockito.verify(implementation, Mockito.times(4)).scan((RpcController) Mockito.any(),
+        (ScanRequest) Mockito.any());
     } finally {
-      if (connection != null && !connection.isClosed()) connection.close();
+      if (connection != null && !connection.isClosed()) {
+        connection.close();
+      }
       zkw.close();
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 3b14b7f..df3088c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -253,7 +253,7 @@ public class TestMetaWithReplicas {
           util.getHBaseClusterInterface().killRegionServer(primary);
           util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000);
         }
-        ((ClusterConnection)c).clearRegionCache();
+        c.clearRegionLocationCache();
       }
       LOG.info("Running GETs");
       Get get = null;
@@ -276,7 +276,7 @@ public class TestMetaWithReplicas {
         util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0);
         util.getHBaseClusterInterface().waitForActiveAndReadyMaster();
         LOG.info("Master active!");
-        ((ClusterConnection)c).clearRegionCache();
+        c.clearRegionLocationCache();
       }
       conf.setBoolean(HConstants.USE_META_REPLICAS, false);
       LOG.info("Running GETs no replicas");
@@ -352,19 +352,24 @@ public class TestMetaWithReplicas {
     };
   }
 
+  private List<HRegionLocation> getMetaRegionLocations() throws IOException {
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+      return locator.getAllRegionLocations();
+    }
+  }
+
   @Nullable
   private String checkMetaLocationAndExplain(int originalReplicaCount)
       throws KeeperException, IOException {
     List<String> metaZnodes = TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes();
     if (metaZnodes.size() == originalReplicaCount) {
-      RegionLocations rl = ((ClusterConnection) TEST_UTIL.getConnection())
-          .locateRegion(TableName.META_TABLE_NAME,
-              HConstants.EMPTY_START_ROW, false, false);
-      for (HRegionLocation location : rl.getRegionLocations()) {
+      List<HRegionLocation> locs = getMetaRegionLocations();
+      for (HRegionLocation location : locs) {
         if (location == null) {
-          return "Null location found in " + rl.toString();
+          return "Null location found in " + locs;
         }
-        if (location.getRegionInfo() == null) {
+        if (location.getRegion() == null) {
           return "Null regionInfo for location " + location;
         }
         if (location.getHostname() == null) {
@@ -387,8 +392,7 @@ public class TestMetaWithReplicas {
   public void testHBaseFsckWithFewerMetaReplicas() throws Exception {
     ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
         TEST_UTIL.getConfiguration());
-    RegionLocations rl = c.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
-        false, false);
+    RegionLocations rl = new RegionLocations(getMetaRegionLocations());
     HBaseFsckRepair.closeRegionSilentlyAndWait(c,
         rl.getRegionLocation(1).getServerName(), rl.getRegionLocation(1).getRegionInfo());
     // check that problem exists
@@ -405,8 +409,7 @@ public class TestMetaWithReplicas {
   public void testHBaseFsckWithFewerMetaReplicaZnodes() throws Exception {
     ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
         TEST_UTIL.getConfiguration());
-    RegionLocations rl = c.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
-        false, false);
+    RegionLocations rl = new RegionLocations(getMetaRegionLocations());
     HBaseFsckRepair.closeRegionSilentlyAndWait(c,
         rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo());
     ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
@@ -484,17 +487,14 @@ public class TestMetaWithReplicas {
     // can be recovered
     try (ClusterConnection conn = (ClusterConnection)
         ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
-      RegionLocations rl = conn.
-          locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
-      HRegionLocation hrl = rl.getRegionLocation(1);
+      HRegionLocation hrl = getMetaRegionLocations().get(1);
       ServerName oldServer = hrl.getServerName();
       TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer);
       int i = 0;
       do {
         LOG.debug("Waiting for the replica " + hrl.getRegionInfo() + " to come up");
         Thread.sleep(10000); //wait for the detection/recovery
-        rl = conn.locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
-        hrl = rl.getRegionLocation(1);
+        hrl = getMetaRegionLocations().get(1);
         i++;
       } while ((hrl == null || hrl.getServerName().equals(oldServer)) && i < 3);
       assertTrue(i != 3);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index 59920d6..0929f9c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -670,9 +670,9 @@ public class TestReplicaWithCluster {
   // within configured hbase.client.metaReplicaCallTimeout.scan from primary meta region.
   @Test
   public void testGetRegionLocationFromPrimaryMetaRegion() throws IOException, InterruptedException {
-    HTU.getAdmin().setBalancerRunning(false, true);
+    HTU.getAdmin().balancerSwitch(false, true);
 
-    ((ConnectionImplementation) HTU.getAdmin().getConnection()).setUseMetaReplicas(true);
+    ((ConnectionImplementation) HTU.getConnection()).setUseMetaReplicas(true);
 
     // Create table then get the single region for our new table.
     HTableDescriptor hdt = HTU.createTableDescriptor("testGetRegionLocationFromPrimaryMetaRegion");
@@ -684,13 +684,13 @@ public class TestReplicaWithCluster {
       RegionServerHostingPrimayMetaRegionSlowOrStopCopro.slowDownPrimaryMetaScan = true;
 
       // Get user table location, always get it from the primary meta replica
-      RegionLocations url = ((ClusterConnection) HTU.getConnection())
-          .locateRegion(hdt.getTableName(), row, false, false);
-
+      try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) {
+        locator.getRegionLocations(row, true);
+      }
     } finally {
       RegionServerHostingPrimayMetaRegionSlowOrStopCopro.slowDownPrimaryMetaScan = false;
-      ((ConnectionImplementation) HTU.getAdmin().getConnection()).setUseMetaReplicas(false);
-      HTU.getAdmin().setBalancerRunning(true, true);
+      ((ConnectionImplementation) HTU.getConnection()).setUseMetaReplicas(false);
+      HTU.getAdmin().balancerSwitch(true, true);
       HTU.getAdmin().disableTable(hdt.getTableName());
       HTU.deleteTable(hdt.getTableName());
     }
@@ -703,25 +703,27 @@ public class TestReplicaWithCluster {
   // with the primary meta region.
   @Test
   public void testReplicaGetWithPrimaryAndMetaDown() throws IOException, InterruptedException {
-    HTU.getAdmin().setBalancerRunning(false, true);
+    HTU.getAdmin().balancerSwitch(false, true);
 
-    ((ConnectionImplementation)HTU.getAdmin().getConnection()).setUseMetaReplicas(true);
+    ((ConnectionImplementation)HTU.getConnection()).setUseMetaReplicas(true);
 
     // Create table then get the single region for our new table.
     HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaGetWithPrimaryAndMetaDown");
     hdt.setRegionReplication(2);
     try {
-
       Table table = HTU.createTable(hdt, new byte[][] { f }, null);
-
       // Get Meta location
-      RegionLocations mrl = ((ClusterConnection) HTU.getConnection())
-          .locateRegion(TableName.META_TABLE_NAME,
-              HConstants.EMPTY_START_ROW, false, false);
+      RegionLocations mrl;
+      try (
+          RegionLocator locator = HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+        mrl = new RegionLocations(locator.getRegionLocations(HConstants.EMPTY_START_ROW, true));
+      }
 
       // Get user table location
-      RegionLocations url = ((ClusterConnection) HTU.getConnection())
-          .locateRegion(hdt.getTableName(), row, false, false);
+      RegionLocations url;
+      try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) {
+        url = new RegionLocations(locator.getRegionLocations(row, true));
+      }
 
       // Make sure that user primary region is co-hosted with the meta region
       if (!url.getDefaultRegionLocation().getServerName().equals(
@@ -740,12 +742,15 @@ public class TestReplicaWithCluster {
 
       // Wait until the meta table is updated with new location info
       while (true) {
-        mrl = ((ClusterConnection) HTU.getConnection())
-            .locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, false, false);
+        try (RegionLocator locator =
+          HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+          mrl = new RegionLocations(locator.getRegionLocations(HConstants.EMPTY_START_ROW, true));
+        }
 
         // Get user table location
-        url = ((ClusterConnection) HTU.getConnection())
-            .locateRegion(hdt.getTableName(), row, false, true);
+        try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) {
+          url = new RegionLocations(locator.getRegionLocations(row, true));
+        }
 
         LOG.info("meta locations " + mrl);
         LOG.info("table locations " + url);
@@ -787,9 +792,9 @@ public class TestReplicaWithCluster {
       Assert.assertTrue(r.isStale());
 
     } finally {
-      ((ConnectionImplementation)HTU.getAdmin().getConnection()).setUseMetaReplicas(false);
+      ((ConnectionImplementation)HTU.getConnection()).setUseMetaReplicas(false);
       RegionServerHostingPrimayMetaRegionSlowOrStopCopro.throwException = false;
-      HTU.getAdmin().setBalancerRunning(true, true);
+      HTU.getAdmin().balancerSwitch(true, true);
       HTU.getAdmin().disableTable(hdt.getTableName());
       HTU.deleteTable(hdt.getTableName());
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 311f651..c26c969 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -229,7 +229,7 @@ public class TestReplicasClient {
 
   @Before
   public void before() throws IOException {
-    ((ClusterConnection) HTU.getAdmin().getConnection()).clearRegionCache();
+    HTU.getConnection().clearRegionLocationCache();
     try {
       openRegion(hriPrimary);
     } catch (Exception ignored) {
@@ -250,8 +250,7 @@ public class TestReplicasClient {
       closeRegion(hriPrimary);
     } catch (Exception ignored) {
     }
-
-    ((ClusterConnection) HTU.getAdmin().getConnection()).clearRegionCache();
+    HTU.getConnection().clearRegionLocationCache();
   }
 
   private HRegionServer getRS() {
@@ -329,17 +328,17 @@ public class TestReplicasClient {
   public void testLocations() throws Exception {
     byte[] b1 = "testLocations".getBytes();
     openRegion(hriSecondary);
-    ClusterConnection hc = (ClusterConnection) HTU.getAdmin().getConnection();
+    ConnectionImplementation hc = (ConnectionImplementation) HTU.getConnection();
 
     try {
-      hc.clearRegionCache();
+      hc.clearRegionLocationCache();
       RegionLocations rl = hc.locateRegion(table.getName(), b1, false, false);
       Assert.assertEquals(2, rl.size());
 
       rl = hc.locateRegion(table.getName(), b1, true, false);
       Assert.assertEquals(2, rl.size());
 
-      hc.clearRegionCache();
+      hc.clearRegionLocationCache();
       rl = hc.locateRegion(table.getName(), b1, true, false);
       Assert.assertEquals(2, rl.size());
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
index 026010d..93c6b4d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
@@ -204,7 +204,7 @@ public class TestSeparateClientZKCluster {
   public void testMetaMoveDuringClientZkClusterRestart() throws Exception {
     TableName tn = TableName.valueOf(name.getMethodName());
     // create table
-    ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
+    Connection conn = TEST_UTIL.getConnection();
     Admin admin = conn.getAdmin();
     HTable table = (HTable) conn.getTable(tn);
     try {
@@ -218,7 +218,7 @@ public class TestSeparateClientZKCluster {
       put.addColumn(family, qualifier, value);
       table.put(put);
       // invalid connection cache
-      conn.clearRegionCache();
+      conn.clearRegionLocationCache();
       // stop client zk cluster
       clientZkCluster.shutdown();
       // stop current meta server and confirm the server shutdown process
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
index 0562c90..2f95a90 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
@@ -297,7 +297,7 @@ public class TestSnapshotCloneIndependence {
    */
   private void runTestRegionOperationsIndependent() throws Exception {
     // Verify that region information is the same pre-split
-    ((ClusterConnection) UTIL.getConnection()).clearRegionCache();
+    UTIL.getConnection().clearRegionLocationCache();
     List<HRegionInfo> originalTableHRegions = admin.getTableRegions(originalTableName);
 
     final int originalRegionCount = originalTableHRegions.size();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
index 446c3f9..0c2532e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
@@ -25,8 +25,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.StartMiniClusterOption;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -58,32 +59,33 @@ public class TestMetaAssignmentWithStopMaster {
 
   @Test
   public void testStopActiveMaster() throws Exception {
-    ClusterConnection conn =
-        (ClusterConnection) ConnectionFactory.createConnection(UTIL.getConfiguration());
-    ServerName oldMetaServer = conn.locateRegions(TableName.META_TABLE_NAME).get(0).getServerName();
-    ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName();
+    try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration());
+        RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) {
+      ServerName oldMetaServer = locator.getAllRegionLocations().get(0).getServerName();
+      ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName();
 
-    UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test");
-    long startTime = System.currentTimeMillis();
-    while (UTIL.getMiniHBaseCluster().getMaster() == null || UTIL.getMiniHBaseCluster().getMaster()
-        .getServerName().equals(oldMaster)) {
-      LOG.info("Wait the standby master become active");
-      Thread.sleep(3000);
-      if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
-        fail("Wait too long for standby master become active");
+      UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test");
+      long startTime = System.currentTimeMillis();
+      while (UTIL.getMiniHBaseCluster().getMaster() == null ||
+        UTIL.getMiniHBaseCluster().getMaster().getServerName().equals(oldMaster)) {
+        LOG.info("Wait the standby master become active");
+        Thread.sleep(3000);
+        if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
+          fail("Wait too long for standby master become active");
+        }
       }
-    }
-    startTime = System.currentTimeMillis();
-    while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
-      LOG.info("Wait the new active master to be initialized");
-      Thread.sleep(3000);
-      if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
-        fail("Wait too long for the new active master to be initialized");
+      startTime = System.currentTimeMillis();
+      while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
+        LOG.info("Wait the new active master to be initialized");
+        Thread.sleep(3000);
+        if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
+          fail("Wait too long for the new active master to be initialized");
+        }
       }
-    }
 
-    ServerName newMetaServer = conn.locateRegions(TableName.META_TABLE_NAME).get(0).getServerName();
-    assertTrue("The new meta server " + newMetaServer + " should be same with" +
+      ServerName newMetaServer = locator.getAllRegionLocations().get(0).getServerName();
+      assertTrue("The new meta server " + newMetaServer + " should be same with" +
         " the old meta server " + oldMetaServer, newMetaServer.equals(oldMetaServer));
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
index a4b99a1..fcc1bb8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -366,24 +366,15 @@ public class TestLoadIncrementalHFilesSplitRecovery {
 
   private ClusterConnection getMockedConnection(final Configuration conf)
       throws IOException, org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
-    ClusterConnection c = Mockito.mock(ClusterConnection.class);
-    Mockito.when(c.getConfiguration()).thenReturn(conf);
-    Mockito.doNothing().when(c).close();
-    // Make it so we return a particular location when asked.
-    final HRegionLocation loc = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO,
-        ServerName.valueOf("example.org", 1234, 0));
-    Mockito.when(
-      c.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean()))
-        .thenReturn(loc);
-    Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())).thenReturn(loc);
-    ClientProtos.ClientService.BlockingInterface hri =
-        Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+    ServerName sn = ServerName.valueOf("example.org", 1234, 0);
+    RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
+    ClientProtos.ClientService.BlockingInterface client =
+      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
     Mockito
-        .when(
-          hri.bulkLoadHFile((RpcController) Mockito.any(), (BulkLoadHFileRequest) Mockito.any()))
-        .thenThrow(new ServiceException(new IOException("injecting bulk load error")));
-    Mockito.when(c.getClient(Mockito.any())).thenReturn(hri);
-    return c;
+      .when(
+        client.bulkLoadHFile((RpcController) Mockito.any(), (BulkLoadHFileRequest) Mockito.any()))
+      .thenThrow(new ServiceException(new IOException("injecting bulk load error")));
+    return HConnectionTestingUtility.getMockedConnectionAndDecorate(conf, null, client, sn, hri);
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index fc23d51..d25ccef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -307,8 +307,7 @@ public class BaseTestHBaseFsck {
       tbl.close();
       tbl = null;
     }
-
-    ((ClusterConnection) connection).clearRegionCache();
+    connection.clearRegionLocationCache();
     deleteTable(TEST_UTIL, tablename);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 410dd0c..0a66ec0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -19,33 +19,34 @@ package org.apache.hadoop.hbase.util;
 import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.INCREMENT;
 import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.MUTATE_INFO;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
+
 /**
  * Common base class for reader and writer parts of multi-thread HBase load
  * test (See LoadTestTool).
@@ -491,7 +492,6 @@ public abstract class MultiThreadedAction {
   }
 
   private void printLocations(Result r) {
-    RegionLocations rl = null;
     if (r == null) {
       LOG.info("FAILED FOR null Result");
       return;
@@ -500,15 +500,14 @@ public abstract class MultiThreadedAction {
     if (r.getRow() == null) {
       return;
     }
-    try {
-      rl = ((ClusterConnection)connection).locateRegion(tableName, r.getRow(), true, true);
+    try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+      List<HRegionLocation> locs = locator.getRegionLocations(r.getRow());
+      for (HRegionLocation h : locs) {
+        LOG.info("LOCATION " + h);
+      }
     } catch (IOException e) {
       LOG.warn("Couldn't get locations for row " + Bytes.toString(r.getRow()));
     }
-    HRegionLocation locations[] = rl.getRegionLocations();
-    for (HRegionLocation h : locations) {
-      LOG.info("LOCATION " + h);
-    }
   }
 
   private String resultToString(Result result) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
index 6864366..27f5fb9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
@@ -21,14 +21,13 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Get;
-
 import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
@@ -377,8 +376,10 @@ public class MultiThreadedReader extends MultiThreadedAction
           numKeysVerified.incrementAndGet();
         }
       } else {
-        HRegionLocation hloc = connection.getRegionLocation(tableName,
-          get.getRow(), false);
+        HRegionLocation hloc;
+        try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+          hloc = locator.getRegionLocation(get.getRow());
+        }
         String rowKey = Bytes.toString(get.getRow());
         LOG.info("Key = " + rowKey + ", Region location: " + hloc);
         if(isNullExpected) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
index 54be0d3..1ebc9b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
@@ -27,10 +27,10 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
 import org.slf4j.Logger;
@@ -97,9 +97,9 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction {
 
   protected String getRegionDebugInfoSafe(Table table, byte[] rowKey) {
     HRegionLocation cached = null, real = null;
-    try {
-      cached = connection.getRegionLocation(tableName, rowKey, false);
-      real = connection.getRegionLocation(tableName, rowKey, true);
+    try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+      cached = locator.getRegionLocation(rowKey, false);
+      real = locator.getRegionLocation(rowKey, true);
     } catch (Throwable t) {
       // Cannot obtain region information for another catch block - too bad!
     }


[hbase] 03/09: HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit afd8f14d09b96dbc1798337bd6b92622701d4fd2
Author: zhangduo <zh...@apache.org>
AuthorDate: Thu Dec 6 21:25:34 2018 +0800

    HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin
---
 .../hbase/client/AsyncClusterConnection.java       |   6 +
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |   4 +
 .../hbase/client/AsyncRegionServerAdmin.java       | 210 +++++++++++++++++++++
 .../org/apache/hadoop/hbase/util/FutureUtils.java  |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |  15 +-
 .../apache/hadoop/hbase/master/ServerManager.java  |  67 -------
 .../master/procedure/RSProcedureDispatcher.java    |  44 +++--
 7 files changed, 262 insertions(+), 86 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index c7dea25..1327fd7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -27,6 +28,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface AsyncClusterConnection extends AsyncConnection {
 
   /**
+   * Get the admin service for the given region server.
+   */
+  AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName);
+
+  /**
    * Get the nonce generator for this connection.
    */
   NonceGenerator getNonceGenerator();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 81dafaf..8cb19a6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -359,4 +359,8 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
     return new HBaseHbck(MasterProtos.HbckService.newBlockingStub(
       rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout)), rpcControllerFactory);
   }
+
+  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
+    return new AsyncRegionServerAdmin(serverName, this);
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
new file mode 100644
index 0000000..9accd89
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
+
+/**
+ * A simple wrapper of the {@link AdminService} for a region server, which returns a
+ * {@link CompletableFuture}. This is easier to use, as if you use the raw protobuf interface, you
+ * need to get the result from the {@link RpcCallback}, and if there is an exception, you need to
+ * get it from the {@link RpcController} passed in.
+ * <p/>
+ * Notice that there is no retry, and this is intentional. We have different retry for different
+ * usage for now, if later we want to unify them, we can move the retry logic into this class.
+ */
+@InterfaceAudience.Private
+public class AsyncRegionServerAdmin {
+
+  private final ServerName server;
+
+  private final AsyncConnectionImpl conn;
+
+  AsyncRegionServerAdmin(ServerName server, AsyncConnectionImpl conn) {
+    this.server = server;
+    this.conn = conn;
+  }
+
+  @FunctionalInterface
+  private interface RpcCall<RESP> {
+    void call(AdminService.Interface stub, HBaseRpcController controller, RpcCallback<RESP> done);
+  }
+
+  private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall) {
+    CompletableFuture<RESP> future = new CompletableFuture<>();
+    HBaseRpcController controller = conn.rpcControllerFactory.newController();
+    try {
+      rpcCall.call(conn.getAdminStub(server), controller, new RpcCallback<RESP>() {
+
+        @Override
+        public void run(RESP resp) {
+          if (controller.failed()) {
+            future.completeExceptionally(controller.getFailed());
+          } else {
+            future.complete(resp);
+          }
+        }
+      });
+    } catch (IOException e) {
+      future.completeExceptionally(e);
+    }
+    return future;
+  }
+
+  public CompletableFuture<GetRegionInfoResponse> getRegionInfo(GetRegionInfoRequest request) {
+    return call((stub, controller, done) -> stub.getRegionInfo(controller, request, done));
+  }
+
+  public CompletableFuture<GetStoreFileResponse> getStoreFile(GetStoreFileRequest request) {
+    return call((stub, controller, done) -> stub.getStoreFile(controller, request, done));
+  }
+
+  public CompletableFuture<GetOnlineRegionResponse> getOnlineRegion(
+      GetOnlineRegionRequest request) {
+    return call((stub, controller, done) -> stub.getOnlineRegion(controller, request, done));
+  }
+
+  public CompletableFuture<OpenRegionResponse> openRegion(OpenRegionRequest request) {
+    return call((stub, controller, done) -> stub.openRegion(controller, request, done));
+  }
+
+  public CompletableFuture<WarmupRegionResponse> warmupRegion(WarmupRegionRequest request) {
+    return call((stub, controller, done) -> stub.warmupRegion(controller, request, done));
+  }
+
+  public CompletableFuture<CloseRegionResponse> closeRegion(CloseRegionRequest request) {
+    return call((stub, controller, done) -> stub.closeRegion(controller, request, done));
+  }
+
+  public CompletableFuture<FlushRegionResponse> flushRegion(FlushRegionRequest request) {
+    return call((stub, controller, done) -> stub.flushRegion(controller, request, done));
+  }
+
+  public CompletableFuture<CompactionSwitchResponse> compactionSwitch(
+      CompactionSwitchRequest request) {
+    return call((stub, controller, done) -> stub.compactionSwitch(controller, request, done));
+  }
+
+  public CompletableFuture<CompactRegionResponse> compactRegion(CompactRegionRequest request) {
+    return call((stub, controller, done) -> stub.compactRegion(controller, request, done));
+  }
+
+  public CompletableFuture<ReplicateWALEntryResponse> replicateWALEntry(
+      ReplicateWALEntryRequest request) {
+    return call((stub, controller, done) -> stub.replicateWALEntry(controller, request, done));
+  }
+
+  public CompletableFuture<ReplicateWALEntryResponse> replay(ReplicateWALEntryRequest request) {
+    return call((stub, controller, done) -> stub.replay(controller, request, done));
+  }
+
+  public CompletableFuture<RollWALWriterResponse> rollWALWriter(RollWALWriterRequest request) {
+    return call((stub, controller, done) -> stub.rollWALWriter(controller, request, done));
+  }
+
+  public CompletableFuture<GetServerInfoResponse> getServerInfo(GetServerInfoRequest request) {
+    return call((stub, controller, done) -> stub.getServerInfo(controller, request, done));
+  }
+
+  public CompletableFuture<StopServerResponse> stopServer(StopServerRequest request) {
+    return call((stub, controller, done) -> stub.stopServer(controller, request, done));
+  }
+
+  public CompletableFuture<UpdateFavoredNodesResponse> updateFavoredNodes(
+      UpdateFavoredNodesRequest request) {
+    return call((stub, controller, done) -> stub.updateFavoredNodes(controller, request, done));
+  }
+
+  public CompletableFuture<UpdateConfigurationResponse> updateConfiguration(
+      UpdateConfigurationRequest request) {
+    return call((stub, controller, done) -> stub.updateConfiguration(controller, request, done));
+  }
+
+  public CompletableFuture<GetRegionLoadResponse> getRegionLoad(GetRegionLoadRequest request) {
+    return call((stub, controller, done) -> stub.getRegionLoad(controller, request, done));
+  }
+
+  public CompletableFuture<ClearCompactionQueuesResponse> clearCompactionQueues(
+      ClearCompactionQueuesRequest request) {
+    return call((stub, controller, done) -> stub.clearCompactionQueues(controller, request, done));
+  }
+
+  public CompletableFuture<ClearRegionBlockCacheResponse> clearRegionBlockCache(
+      ClearRegionBlockCacheRequest request) {
+    return call((stub, controller, done) -> stub.clearRegionBlockCache(controller, request, done));
+  }
+
+  public CompletableFuture<GetSpaceQuotaSnapshotsResponse> getSpaceQuotaSnapshots(
+      GetSpaceQuotaSnapshotsRequest request) {
+    return call((stub, controller, done) -> stub.getSpaceQuotaSnapshots(controller, request, done));
+  }
+
+  public CompletableFuture<ExecuteProceduresResponse> executeProcedures(
+      ExecuteProceduresRequest request) {
+    return call((stub, controller, done) -> stub.executeProcedures(controller, request, done));
+  }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
index 861dacb..38cd952 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
@@ -134,4 +134,4 @@ public final class FutureUtils {
       throw new IOException(cause);
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 7579fd5..cf56c4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -195,6 +195,7 @@ import org.apache.hadoop.hbase.util.BloomFilterUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.EncryptionTest;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.HBaseFsck;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -227,6 +228,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
@@ -1955,6 +1957,15 @@ public class HMaster extends HRegionServer implements MasterServices {
     });
   }
 
+  private void warmUpRegion(ServerName server, RegionInfo region) {
+    FutureUtils.addListener(asyncClusterConnection.getRegionServerAdmin(server)
+      .warmupRegion(RequestConverter.buildWarmupRegionRequest(region)), (r, e) -> {
+        if (e != null) {
+          LOG.warn("Failed to warm up region {} on server {}", region, server, e);
+        }
+      });
+  }
+
   // Public so can be accessed by tests. Blocks until move is done.
   // Replace with an async implementation from which you can get
   // a success/failure result.
@@ -2026,7 +2037,9 @@ public class HMaster extends HRegionServer implements MasterServices {
       // Warmup the region on the destination before initiating the move. this call
       // is synchronous and takes some time. doing it before the source region gets
       // closed
-      serverManager.sendRegionWarmup(rp.getDestination(), hri);
+      // A region server could reject the close request because it either does not
+      // have the specified region or the region is being split.
+      warmUpRegion(rp.getDestination(), hri);
 
       LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
       Future<byte []> future = this.assignmentManager.moveAsync(rp);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 0fb1551..a8d5e21 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -51,12 +50,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -159,25 +155,16 @@ public class ServerManager {
   private final ConcurrentNavigableMap<ServerName, ServerMetrics> onlineServers =
     new ConcurrentSkipListMap<>();
 
-  /**
-   * Map of admin interfaces per registered regionserver; these interfaces we use to control
-   * regionservers out on the cluster
-   */
-  private final Map<ServerName, AdminService.BlockingInterface> rsAdmins = new HashMap<>();
-
   /** List of region servers that should not get any more new regions. */
   private final ArrayList<ServerName> drainingServers = new ArrayList<>();
 
   private final MasterServices master;
-  private final ClusterConnection connection;
 
   private final DeadServer deadservers = new DeadServer();
 
   private final long maxSkew;
   private final long warningSkew;
 
-  private final RpcControllerFactory rpcControllerFactory;
-
   /** Listeners that are called on server events. */
   private List<ServerListener> listeners = new CopyOnWriteArrayList<>();
 
@@ -189,8 +176,6 @@ public class ServerManager {
     Configuration c = master.getConfiguration();
     maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
     warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
-    this.connection = master.getClusterConnection();
-    this.rpcControllerFactory = this.connection == null? null: connection.getRpcControllerFactory();
     persistFlushedSequenceId = c.getBoolean(PERSIST_FLUSHEDSEQUENCEID,
         PERSIST_FLUSHEDSEQUENCEID_DEFAULT);
   }
@@ -438,7 +423,6 @@ public class ServerManager {
   void recordNewServerWithLock(final ServerName serverName, final ServerMetrics sl) {
     LOG.info("Registering regionserver=" + serverName);
     this.onlineServers.put(serverName, sl);
-    this.rsAdmins.remove(serverName);
   }
 
   @VisibleForTesting
@@ -633,7 +617,6 @@ public class ServerManager {
       this.onlineServers.remove(sn);
       onlineServers.notifyAll();
     }
-    this.rsAdmins.remove(sn);
   }
 
   /*
@@ -676,34 +659,6 @@ public class ServerManager {
     return this.drainingServers.add(sn);
   }
 
-  // RPC methods to region servers
-
-  private HBaseRpcController newRpcController() {
-    return rpcControllerFactory == null ? null : rpcControllerFactory.newController();
-  }
-
-  /**
-   * Sends a WARMUP RPC to the specified server to warmup the specified region.
-   * <p>
-   * A region server could reject the close request because it either does not
-   * have the specified region or the region is being split.
-   * @param server server to warmup a region
-   * @param region region to  warmup
-   */
-  public void sendRegionWarmup(ServerName server,
-      RegionInfo region) {
-    if (server == null) return;
-    try {
-      AdminService.BlockingInterface admin = getRsAdmin(server);
-      HBaseRpcController controller = newRpcController();
-      ProtobufUtil.warmupRegion(controller, admin, region);
-    } catch (IOException e) {
-      LOG.error("Received exception in RPC for warmup server:" +
-        server + "region: " + region +
-        "exception: " + e);
-    }
-  }
-
   /**
    * Contacts a region server and waits up to timeout ms
    * to close the region.  This bypasses the active hmaster.
@@ -737,28 +692,6 @@ public class ServerManager {
   }
 
   /**
-   * @param sn
-   * @return Admin interface for the remote regionserver named <code>sn</code>
-   * @throws IOException
-   * @throws RetriesExhaustedException wrapping a ConnectException if failed
-   */
-  public AdminService.BlockingInterface getRsAdmin(final ServerName sn)
-  throws IOException {
-    AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
-    if (admin == null) {
-      LOG.debug("New admin connection to " + sn.toString());
-      if (sn.equals(master.getServerName()) && master instanceof HRegionServer) {
-        // A master is also a region server now, see HBASE-10569 for details
-        admin = ((HRegionServer)master).getRSRpcServices();
-      } else {
-        admin = this.connection.getAdmin(sn);
-      }
-      this.rsAdmins.put(sn, admin);
-    }
-    return admin;
-  }
-
-  /**
    * Calculate min necessary to start. This is not an absolute. It is just
    * a friction that will cause us hang around a bit longer waiting on
    * RegionServers to check-in.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 638f9d3..f3ab4b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -18,12 +18,15 @@
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
@@ -37,11 +40,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
 import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
 import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -159,13 +162,8 @@ public class RSProcedureDispatcher
       this.serverName = serverName;
     }
 
-    protected AdminService.BlockingInterface getRsAdmin() throws IOException {
-      final AdminService.BlockingInterface admin = master.getServerManager().getRsAdmin(serverName);
-      if (admin == null) {
-        throw new IOException("Attempting to send OPEN RPC to server " + getServerName() +
-          " failed because no RPC connection found to this server");
-      }
-      return admin;
+    protected AsyncRegionServerAdmin getRsAdmin() throws IOException {
+      return master.getAsyncClusterConnection().getRegionServerAdmin(serverName);
     }
 
     protected ServerName getServerName() {
@@ -344,9 +342,13 @@ public class RSProcedureDispatcher
     protected ExecuteProceduresResponse sendRequest(final ServerName serverName,
         final ExecuteProceduresRequest request) throws IOException {
       try {
-        return getRsAdmin().executeProcedures(null, request);
-      } catch (ServiceException se) {
-        throw ProtobufUtil.getRemoteException(se);
+        return getRsAdmin().executeProcedures(request).get();
+      } catch (InterruptedException e) {
+        throw (IOException) new InterruptedIOException().initCause(e);
+      } catch (ExecutionException e) {
+        Throwable cause = e.getCause();
+        Throwables.propagateIfPossible(cause, IOException.class);
+        throw new IOException(cause);
       }
     }
 
@@ -407,9 +409,13 @@ public class RSProcedureDispatcher
     private OpenRegionResponse sendRequest(final ServerName serverName,
         final OpenRegionRequest request) throws IOException {
       try {
-        return getRsAdmin().openRegion(null, request);
-      } catch (ServiceException se) {
-        throw ProtobufUtil.getRemoteException(se);
+        return getRsAdmin().openRegion(request).get();
+      } catch (InterruptedException e) {
+        throw (IOException) new InterruptedIOException().initCause(e);
+      } catch (ExecutionException e) {
+        Throwable cause = e.getCause();
+        Throwables.propagateIfPossible(cause, IOException.class);
+        throw new IOException(cause);
       }
     }
 
@@ -453,9 +459,13 @@ public class RSProcedureDispatcher
     private CloseRegionResponse sendRequest(final ServerName serverName,
         final CloseRegionRequest request) throws IOException {
       try {
-        return getRsAdmin().closeRegion(null, request);
-      } catch (ServiceException se) {
-        throw ProtobufUtil.getRemoteException(se);
+        return getRsAdmin().closeRegion(request).get();
+      } catch (InterruptedException e) {
+        throw (IOException) new InterruptedIOException().initCause(e);
+      } catch (ExecutionException e) {
+        Throwable cause = e.getCause();
+        Throwables.propagateIfPossible(cause, IOException.class);
+        throw new IOException(cause);
       }
     }
 


[hbase] 02/09: HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 75b670da99db75300d48982dfc1699dba5cbecdb
Author: zhangduo <zh...@apache.org>
AuthorDate: Sat Dec 1 21:15:48 2018 +0800

    HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager
---
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |  5 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  7 ++-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  2 +-
 .../hbase/regionserver/SecureBulkLoadManager.java  | 24 ++++-----
 .../hadoop/hbase/security/token/TokenUtil.java     | 57 +++++++++++++++++-----
 .../hadoop/hbase/security/token/TestTokenUtil.java | 42 ++++++++++++----
 6 files changed, 96 insertions(+), 41 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index a3d49b5..d9e620b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -261,13 +261,12 @@ public final class ProtobufUtil {
    * just {@link ServiceException}. Prefer this method to
    * {@link #getRemoteException(ServiceException)} because trying to
    * contain direct protobuf references.
-   * @param e
    */
-  public static IOException handleRemoteException(Exception e) {
+  public static IOException handleRemoteException(Throwable e) {
     return makeIOExceptionOfException(e);
   }
 
-  private static IOException makeIOExceptionOfException(Exception e) {
+  private static IOException makeIOExceptionOfException(Throwable e) {
     Throwable t = e;
     if (e instanceof ServiceException ||
         e instanceof org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index fea81f1..de2fb7d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -40,7 +40,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ByteBufferExtendedCell;
@@ -123,6 +122,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Service;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -343,13 +343,12 @@ public final class ProtobufUtil {
    * just {@link ServiceException}. Prefer this method to
    * {@link #getRemoteException(ServiceException)} because trying to
    * contain direct protobuf references.
-   * @param e
    */
-  public static IOException handleRemoteException(Exception e) {
+  public static IOException handleRemoteException(Throwable e) {
     return makeIOExceptionOfException(e);
   }
 
-  private static IOException makeIOExceptionOfException(Exception e) {
+  private static IOException makeIOExceptionOfException(Throwable e) {
     Throwable t = e;
     if (e instanceof ServiceException) {
       t = e.getCause();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index dbc5e77..2e54907 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1937,7 +1937,7 @@ public class HRegionServer extends HasThread implements
     if (!isStopped() && !isAborted()) {
       initializeThreads();
     }
-    this.secureBulkLoadManager = new SecureBulkLoadManager(this.conf, clusterConnection);
+    this.secureBulkLoadManager = new SecureBulkLoadManager(this.conf, asyncClusterConnection);
     this.secureBulkLoadManager.start();
 
     // Health checker thread.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index 566a6b6..add6519 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,7 +28,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.function.BiFunction;
 import java.util.function.Consumer;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -38,11 +37,12 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier;
 import org.apache.hadoop.hbase.security.token.FsDelegationToken;
 import org.apache.hadoop.hbase.security.token.TokenUtil;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -56,7 +56,9 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
@@ -111,9 +113,9 @@ public class SecureBulkLoadManager {
 
   private UserProvider userProvider;
   private ConcurrentHashMap<UserGroupInformation, Integer> ugiReferenceCounter;
-  private Connection conn;
+  private AsyncConnection conn;
 
-  SecureBulkLoadManager(Configuration conf, Connection conn) {
+  SecureBulkLoadManager(Configuration conf, AsyncConnection conn) {
     this.conf = conf;
     this.conn = conn;
   }
@@ -212,23 +214,23 @@ public class SecureBulkLoadManager {
       familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath()));
     }
 
-    Token userToken = null;
+    Token<AuthenticationTokenIdentifier> userToken = null;
     if (userProvider.isHadoopSecurityEnabled()) {
-      userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken()
-              .getPassword().toByteArray(), new Text(request.getFsToken().getKind()), new Text(
-              request.getFsToken().getService()));
+      userToken = new Token<>(request.getFsToken().getIdentifier().toByteArray(),
+        request.getFsToken().getPassword().toByteArray(), new Text(request.getFsToken().getKind()),
+        new Text(request.getFsToken().getService()));
     }
     final String bulkToken = request.getBulkToken();
     User user = getActiveUser();
     final UserGroupInformation ugi = user.getUGI();
     if (userProvider.isHadoopSecurityEnabled()) {
       try {
-        Token tok = TokenUtil.obtainToken(conn);
+        Token<AuthenticationTokenIdentifier> tok = TokenUtil.obtainToken(conn).get();
         if (tok != null) {
           boolean b = ugi.addToken(tok);
           LOG.debug("token added " + tok + " for user " + ugi + " return=" + b);
         }
-      } catch (IOException ioe) {
+      } catch (Exception ioe) {
         LOG.warn("unable to add token", ioe);
       }
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
index c54d905..28efb84 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,27 +15,29 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.security.token;
 
+import com.google.protobuf.ByteString;
+import com.google.protobuf.ServiceException;
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.PrivilegedExceptionAction;
-
-import com.google.protobuf.ByteString;
-import com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.AsyncTable;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.AuthenticationService;
+import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenResponse;
 import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.Job;
@@ -45,6 +47,8 @@ import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
 /**
  * Utility methods for obtaining authentication tokens.
  */
@@ -64,12 +68,39 @@ public class TokenUtil {
 
   /**
    * Obtain and return an authentication token for the current user.
+   * @param conn The async HBase cluster connection
+   * @return the authentication token instance, wrapped by a {@link CompletableFuture}.
+   */
+  public static CompletableFuture<Token<AuthenticationTokenIdentifier>> obtainToken(
+      AsyncConnection conn) {
+    CompletableFuture<Token<AuthenticationTokenIdentifier>> future = new CompletableFuture<>();
+    if (injectedException != null) {
+      future.completeExceptionally(injectedException);
+      return future;
+    }
+    AsyncTable<?> table = conn.getTable(TableName.META_TABLE_NAME);
+    table.<AuthenticationService.Interface, GetAuthenticationTokenResponse> coprocessorService(
+      AuthenticationProtos.AuthenticationService::newStub,
+      (s, c, r) -> s.getAuthenticationToken(c,
+        AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance(), r),
+      HConstants.EMPTY_START_ROW).whenComplete((resp, error) -> {
+        if (error != null) {
+          future.completeExceptionally(ProtobufUtil.handleRemoteException(error));
+        } else {
+          future.complete(toToken(resp.getToken()));
+        }
+      });
+    return future;
+  }
+
+  /**
+   * Obtain and return an authentication token for the current user.
    * @param conn The HBase cluster connection
    * @throws IOException if a remote error or serialization problem occurs.
    * @return the authentication token instance
    */
-  public static Token<AuthenticationTokenIdentifier> obtainToken(
-      Connection conn) throws IOException {
+  public static Token<AuthenticationTokenIdentifier> obtainToken(Connection conn)
+      throws IOException {
     Table meta = null;
     try {
       injectFault();
@@ -77,9 +108,9 @@ public class TokenUtil {
       meta = conn.getTable(TableName.META_TABLE_NAME);
       CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
       AuthenticationProtos.AuthenticationService.BlockingInterface service =
-          AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
-      AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
-          AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
+        AuthenticationService.newBlockingStub(rpcChannel);
+      GetAuthenticationTokenResponse response =
+        service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance());
 
       return toToken(response.getToken());
     } catch (ServiceException se) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenUtil.java
index 32fcddb..585a3ec 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenUtil.java
@@ -18,35 +18,53 @@
 package org.apache.hadoop.hbase.security.token;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
 import java.net.URL;
 import java.net.URLClassLoader;
-
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 @Category(SmallTests.class)
 public class TestTokenUtil {
+
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestTokenUtil.class);
+    HBaseClassTestRule.forClass(TestTokenUtil.class);
 
-  @Test
-  public void testObtainToken() throws Exception {
+  private URLClassLoader cl;
+
+  @Before
+  public void setUp() {
     URL urlPU = ProtobufUtil.class.getProtectionDomain().getCodeSource().getLocation();
     URL urlTU = TokenUtil.class.getProtectionDomain().getCodeSource().getLocation();
+    cl = new URLClassLoader(new URL[] { urlPU, urlTU }, getClass().getClassLoader());
+  }
 
-    ClassLoader cl = new URLClassLoader(new URL[] { urlPU, urlTU }, getClass().getClassLoader());
+  @After
+  public void tearDown() throws IOException {
+    Closeables.close(cl, true);
+  }
 
+  @Test
+  public void testObtainToken() throws Exception {
     Throwable injected = new com.google.protobuf.ServiceException("injected");
 
     Class<?> tokenUtil = cl.loadClass(TokenUtil.class.getCanonicalName());
@@ -55,8 +73,7 @@ public class TestTokenUtil {
     shouldInjectFault.set(null, injected);
 
     try {
-      tokenUtil.getMethod("obtainToken", Connection.class)
-          .invoke(null, new Object[] { null });
+      tokenUtil.getMethod("obtainToken", Connection.class).invoke(null, new Object[] { null });
       fail("Should have injected exception.");
     } catch (InvocationTargetException e) {
       Throwable t = e;
@@ -72,9 +89,16 @@ public class TestTokenUtil {
       }
     }
 
+    CompletableFuture<?> future = (CompletableFuture<?>) tokenUtil
+      .getMethod("obtainToken", AsyncConnection.class).invoke(null, new Object[] { null });
+    try {
+      future.get();
+      fail("Should have injected exception.");
+    } catch (ExecutionException e) {
+      assertSame(injected, e.getCause());
+    }
     Boolean loaded = (Boolean) cl.loadClass(ProtobufUtil.class.getCanonicalName())
-        .getDeclaredMethod("isClassLoaderLoaded")
-        .invoke(null);
+      .getDeclaredMethod("isClassLoaderLoaded").invoke(null);
     assertFalse("Should not have loaded DynamicClassLoader", loaded);
   }
 }


[hbase] 08/09: HBASE-21719 Rewrite RegionPlacementMaintainer to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d2ef2ad5315813fdfb6a9be1b51e44a63cd93305
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Jan 15 11:43:41 2019 +0800

    HBASE-21719 Rewrite RegionPlacementMaintainer to use AsyncClusterConnection
    
    Signed-off-by: Michael Stack <st...@apache.org>
---
 .../hbase/master/RegionPlacementMaintainer.java    | 225 +++++++++++----------
 1 file changed, 113 insertions(+), 112 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index faf5e4a..fda0a9c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -1,5 +1,4 @@
 /**
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.text.DecimalFormat;
 import java.util.ArrayList;
@@ -39,29 +38,30 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.favored.FavoredNodesPlan;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.MunkresAssignment;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException;
 
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
 
@@ -71,7 +71,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavor
  */
 @InterfaceAudience.Private
 // TODO: Remove? Unused. Partially implemented only.
-public class RegionPlacementMaintainer {
+public class RegionPlacementMaintainer implements Closeable {
   private static final Logger LOG = LoggerFactory.getLogger(RegionPlacementMaintainer.class
       .getName());
   //The cost of a placement that should never be assigned.
@@ -96,9 +96,9 @@ public class RegionPlacementMaintainer {
   private final boolean enforceMinAssignmentMove;
   private RackManager rackManager;
   private Set<TableName> targetTableSet;
-  private final Connection connection;
+  private AsyncClusterConnection connection;
 
-  public RegionPlacementMaintainer(Configuration conf) {
+  public RegionPlacementMaintainer(Configuration conf) throws IOException {
     this(conf, true, true);
   }
 
@@ -109,11 +109,6 @@ public class RegionPlacementMaintainer {
     this.enforceMinAssignmentMove = enforceMinAssignmentMove;
     this.targetTableSet = new HashSet<>();
     this.rackManager = new RackManager(conf);
-    try {
-      this.connection = ConnectionFactory.createConnection(this.conf);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
   }
 
   private static void printHelp(Options opt) {
@@ -124,6 +119,14 @@ public class RegionPlacementMaintainer {
         " [-fs hdfs://a.b.c.d:9000] [-hbase_root /HBASE]", opt);
   }
 
+  private AsyncClusterConnection getConnection() throws IOException {
+    if (connection == null) {
+      connection =
+        ClusterConnectionFactory.createAsyncClusterConnection(this.conf, null, User.getCurrent());
+    }
+    return connection;
+  }
+
   public void setTargetTableName(String[] tableNames) {
     if (tableNames != null) {
       for (String table : tableNames)
@@ -133,10 +136,8 @@ public class RegionPlacementMaintainer {
 
   /**
    * @return the new RegionAssignmentSnapshot
-   * @throws IOException
    */
-  public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot()
-  throws IOException {
+  public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot() throws IOException {
     SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot =
       new SnapshotOfRegionAssignmentFromMeta(ConnectionFactory.createConnection(conf));
     currentAssignmentShapshot.initialize();
@@ -145,9 +146,6 @@ public class RegionPlacementMaintainer {
 
   /**
    * Verify the region placement is consistent with the assignment plan
-   * @param isDetailMode
-   * @return reports
-   * @throws IOException
    */
   public List<AssignmentVerificationReport> verifyRegionPlacement(boolean isDetailMode)
       throws IOException {
@@ -206,10 +204,9 @@ public class RegionPlacementMaintainer {
 
     // Get the all the region servers
     List<ServerName> servers = new ArrayList<>();
-    try (Admin admin = this.connection.getAdmin()) {
-      servers.addAll(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+    servers.addAll(
+      FutureUtils.get(getConnection().getAdmin().getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)))
         .getLiveServerMetrics().keySet());
-    }
 
     LOG.info("Start to generate assignment plan for " + numRegions +
         " regions from table " + tableName + " with " +
@@ -492,6 +489,11 @@ public class RegionPlacementMaintainer {
     return plan;
   }
 
+  @Override
+  public void close() throws IOException {
+    Closeables.close(connection, true);
+  }
+
   /**
    * Some algorithms for solving the assignment problem may traverse workers or
    * jobs in linear order which may result in skewing the assignments of the
@@ -690,19 +692,17 @@ public class RegionPlacementMaintainer {
         }
         if (singleServerPlan != null) {
           // Update the current region server with its updated favored nodes
-          BlockingInterface currentRegionServer =
-            ((ClusterConnection)this.connection).getAdmin(entry.getKey());
+          AsyncRegionServerAdmin rsAdmin = getConnection().getRegionServerAdmin(entry.getKey());
           UpdateFavoredNodesRequest request =
-              RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
-
+            RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
           UpdateFavoredNodesResponse updateFavoredNodesResponse =
-              currentRegionServer.updateFavoredNodes(null, request);
+            FutureUtils.get(rsAdmin.updateFavoredNodes(request));
           LOG.info("Region server " +
-              ProtobufUtil.getServerInfo(null, currentRegionServer).getServerName() +
-              " has updated " + updateFavoredNodesResponse.getResponse() + " / " +
-              singleServerPlan.getAssignmentMap().size() +
-              " regions with the assignment plan");
-          succeededNum ++;
+            FutureUtils.get(rsAdmin.getServerInfo(RequestConverter.buildGetServerInfoRequest()))
+              .getServerInfo() +
+            " has updated " + updateFavoredNodesResponse.getResponse() + " / " +
+            singleServerPlan.getAssignmentMap().size() + " regions with the assignment plan");
+          succeededNum++;
         }
       } catch (Exception e) {
         failedUpdateMap.put(entry.getKey(), e);
@@ -719,7 +719,7 @@ public class RegionPlacementMaintainer {
           " region servers with its corresponding favored nodes");
       for (Map.Entry<ServerName, Exception> entry :
         failedUpdateMap.entrySet() ) {
-        LOG.error("Failed to update " + entry.getKey().getHostAndPort() +
+        LOG.error("Failed to update " + entry.getKey().getAddress() +
             " because of " + entry.getValue().getMessage());
       }
     }
@@ -1019,93 +1019,94 @@ public class RegionPlacementMaintainer {
       }
 
       // Create the region placement obj
-      RegionPlacementMaintainer rp = new RegionPlacementMaintainer(conf, enforceLocality,
-          enforceMinAssignmentMove);
+      try (RegionPlacementMaintainer rp =
+        new RegionPlacementMaintainer(conf, enforceLocality, enforceMinAssignmentMove)) {
 
-      if (cmd.hasOption("d") || cmd.hasOption("verification-details")) {
-        verificationDetails = true;
-      }
-
-      if (cmd.hasOption("tables")) {
-        String tableNameListStr = cmd.getOptionValue("tables");
-        String[] tableNames = StringUtils.split(tableNameListStr, ",");
-        rp.setTargetTableName(tableNames);
-      }
+        if (cmd.hasOption("d") || cmd.hasOption("verification-details")) {
+          verificationDetails = true;
+        }
 
-      if (cmd.hasOption("munkres")) {
-        USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = true;
-      }
+        if (cmd.hasOption("tables")) {
+          String tableNameListStr = cmd.getOptionValue("tables");
+          String[] tableNames = StringUtils.split(tableNameListStr, ",");
+          rp.setTargetTableName(tableNames);
+        }
 
-      // Read all the modes
-      if (cmd.hasOption("v") || cmd.hasOption("verify")) {
-        // Verify the region placement.
-        rp.verifyRegionPlacement(verificationDetails);
-      } else if (cmd.hasOption("n") || cmd.hasOption("dry-run")) {
-        // Generate the assignment plan only without updating the hbase:meta and RS
-        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
-        printAssignmentPlan(plan);
-      } else if (cmd.hasOption("w") || cmd.hasOption("write")) {
-        // Generate the new assignment plan
-        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
-        // Print the new assignment plan
-        printAssignmentPlan(plan);
-        // Write the new assignment plan to META
-        rp.updateAssignmentPlanToMeta(plan);
-      } else if (cmd.hasOption("u") || cmd.hasOption("update")) {
-        // Generate the new assignment plan
-        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
-        // Print the new assignment plan
-        printAssignmentPlan(plan);
-        // Update the assignment to hbase:meta and Region Servers
-        rp.updateAssignmentPlan(plan);
-      } else if (cmd.hasOption("diff")) {
-        FavoredNodesPlan newPlan = rp.getNewAssignmentPlan();
-        Map<String, Map<String, Float>> locality = FSUtils
-            .getRegionDegreeLocalityMappingFromFS(conf);
-        Map<TableName, Integer> movesPerTable = rp.getRegionsMovement(newPlan);
-        rp.checkDifferencesWithOldPlan(movesPerTable, locality, newPlan);
-        System.out.println("Do you want to update the assignment plan? [y/n]");
-        Scanner s = new Scanner(System.in);
-        String input = s.nextLine().trim();
-        if (input.equals("y")) {
-          System.out.println("Updating assignment plan...");
-          rp.updateAssignmentPlan(newPlan);
+        if (cmd.hasOption("munkres")) {
+          USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = true;
         }
-        s.close();
-      } else if (cmd.hasOption("ld")) {
-        Map<String, Map<String, Float>> locality = FSUtils
-            .getRegionDegreeLocalityMappingFromFS(conf);
-        rp.printLocalityAndDispersionForCurrentPlan(locality);
-      } else if (cmd.hasOption("p") || cmd.hasOption("print")) {
-        FavoredNodesPlan plan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan();
-        printAssignmentPlan(plan);
-      } else if (cmd.hasOption("overwrite")) {
-        if (!cmd.hasOption("f") || !cmd.hasOption("r")) {
-          throw new IllegalArgumentException("Please specify: " +
+
+        // Read all the modes
+        if (cmd.hasOption("v") || cmd.hasOption("verify")) {
+          // Verify the region placement.
+          rp.verifyRegionPlacement(verificationDetails);
+        } else if (cmd.hasOption("n") || cmd.hasOption("dry-run")) {
+          // Generate the assignment plan only without updating the hbase:meta and RS
+          FavoredNodesPlan plan = rp.getNewAssignmentPlan();
+          printAssignmentPlan(plan);
+        } else if (cmd.hasOption("w") || cmd.hasOption("write")) {
+          // Generate the new assignment plan
+          FavoredNodesPlan plan = rp.getNewAssignmentPlan();
+          // Print the new assignment plan
+          printAssignmentPlan(plan);
+          // Write the new assignment plan to META
+          rp.updateAssignmentPlanToMeta(plan);
+        } else if (cmd.hasOption("u") || cmd.hasOption("update")) {
+          // Generate the new assignment plan
+          FavoredNodesPlan plan = rp.getNewAssignmentPlan();
+          // Print the new assignment plan
+          printAssignmentPlan(plan);
+          // Update the assignment to hbase:meta and Region Servers
+          rp.updateAssignmentPlan(plan);
+        } else if (cmd.hasOption("diff")) {
+          FavoredNodesPlan newPlan = rp.getNewAssignmentPlan();
+          Map<String, Map<String, Float>> locality =
+            FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
+          Map<TableName, Integer> movesPerTable = rp.getRegionsMovement(newPlan);
+          rp.checkDifferencesWithOldPlan(movesPerTable, locality, newPlan);
+          System.out.println("Do you want to update the assignment plan? [y/n]");
+          Scanner s = new Scanner(System.in);
+          String input = s.nextLine().trim();
+          if (input.equals("y")) {
+            System.out.println("Updating assignment plan...");
+            rp.updateAssignmentPlan(newPlan);
+          }
+          s.close();
+        } else if (cmd.hasOption("ld")) {
+          Map<String, Map<String, Float>> locality =
+            FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
+          rp.printLocalityAndDispersionForCurrentPlan(locality);
+        } else if (cmd.hasOption("p") || cmd.hasOption("print")) {
+          FavoredNodesPlan plan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan();
+          printAssignmentPlan(plan);
+        } else if (cmd.hasOption("overwrite")) {
+          if (!cmd.hasOption("f") || !cmd.hasOption("r")) {
+            throw new IllegalArgumentException("Please specify: " +
               " -update -r regionName -f server1:port,server2:port,server3:port");
-        }
+          }
 
-        String regionName = cmd.getOptionValue("r");
-        String favoredNodesStr = cmd.getOptionValue("f");
-        LOG.info("Going to update the region " + regionName + " with the new favored nodes " +
+          String regionName = cmd.getOptionValue("r");
+          String favoredNodesStr = cmd.getOptionValue("f");
+          LOG.info("Going to update the region " + regionName + " with the new favored nodes " +
             favoredNodesStr);
-        List<ServerName> favoredNodes = null;
-        RegionInfo regionInfo =
+          List<ServerName> favoredNodes = null;
+          RegionInfo regionInfo =
             rp.getRegionAssignmentSnapshot().getRegionNameToRegionInfoMap().get(regionName);
-        if (regionInfo == null) {
-          LOG.error("Cannot find the region " + regionName + " from the META");
-        } else {
-          try {
-            favoredNodes = getFavoredNodeList(favoredNodesStr);
-          } catch (IllegalArgumentException e) {
-            LOG.error("Cannot parse the invalid favored nodes because " + e);
+          if (regionInfo == null) {
+            LOG.error("Cannot find the region " + regionName + " from the META");
+          } else {
+            try {
+              favoredNodes = getFavoredNodeList(favoredNodesStr);
+            } catch (IllegalArgumentException e) {
+              LOG.error("Cannot parse the invalid favored nodes because " + e);
+            }
+            FavoredNodesPlan newPlan = new FavoredNodesPlan();
+            newPlan.updateFavoredNodesMap(regionInfo, favoredNodes);
+            rp.updateAssignmentPlan(newPlan);
           }
-          FavoredNodesPlan newPlan = new FavoredNodesPlan();
-          newPlan.updateFavoredNodesMap(regionInfo, favoredNodes);
-          rp.updateAssignmentPlan(newPlan);
+        } else {
+          printHelp(opt);
         }
-      } else {
-        printHelp(opt);
       }
     } catch (ParseException e) {
       printHelp(opt);


[hbase] 04/09: HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 8cb5a6356938b16afa1e7e7924b8505c1b7eece1
Author: zhangduo <zh...@apache.org>
AuthorDate: Tue Jan 1 21:27:14 2019 +0800

    HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint
---
 .../hbase/client/AsyncRegionServerAdmin.java       | 14 +++++---
 .../hbase/protobuf/ReplicationProtbufUtil.java     | 35 ++++++++++---------
 .../HBaseInterClusterReplicationEndpoint.java      | 31 +++++++++--------
 .../regionserver/ReplicationSinkManager.java       | 40 ++++++++--------------
 .../hbase/replication/SyncReplicationTestBase.java | 12 +++----
 .../regionserver/TestReplicationSinkManager.java   | 21 +++++-------
 6 files changed, 74 insertions(+), 79 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
index 9accd89..b9141a9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -94,9 +95,9 @@ public class AsyncRegionServerAdmin {
     void call(AdminService.Interface stub, HBaseRpcController controller, RpcCallback<RESP> done);
   }
 
-  private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall) {
+  private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall, CellScanner cellScanner) {
     CompletableFuture<RESP> future = new CompletableFuture<>();
-    HBaseRpcController controller = conn.rpcControllerFactory.newController();
+    HBaseRpcController controller = conn.rpcControllerFactory.newController(cellScanner);
     try {
       rpcCall.call(conn.getAdminStub(server), controller, new RpcCallback<RESP>() {
 
@@ -115,6 +116,10 @@ public class AsyncRegionServerAdmin {
     return future;
   }
 
+  private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall) {
+    return call(rpcCall, null);
+  }
+
   public CompletableFuture<GetRegionInfoResponse> getRegionInfo(GetRegionInfoRequest request) {
     return call((stub, controller, done) -> stub.getRegionInfo(controller, request, done));
   }
@@ -154,8 +159,9 @@ public class AsyncRegionServerAdmin {
   }
 
   public CompletableFuture<ReplicateWALEntryResponse> replicateWALEntry(
-      ReplicateWALEntryRequest request) {
-    return call((stub, controller, done) -> stub.replicateWALEntry(controller, request, done));
+      ReplicateWALEntryRequest request, CellScanner cellScanner) {
+    return call((stub, controller, done) -> stub.replicateWALEntry(controller, request, done),
+      cellScanner);
   }
 
   public CompletableFuture<ReplicateWALEntryResponse> replay(ReplicateWALEntryRequest request) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index c1b3911..74fad26 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -20,51 +20,54 @@ package org.apache.hadoop.hbase.protobuf;
 
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.HBaseRpcControllerImpl;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 
 @InterfaceAudience.Private
 public class ReplicationProtbufUtil {
+
   /**
-   * A helper to replicate a list of WAL entries using admin protocol.
-   * @param admin Admin service
+   * A helper to replicate a list of WAL entries using region server admin
+   * @param admin the region server admin
    * @param entries Array of WAL entries to be replicated
    * @param replicationClusterId Id which will uniquely identify source cluster FS client
    *          configurations in the replication configuration directory
    * @param sourceBaseNamespaceDir Path to source cluster base namespace directory
    * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
-   * @throws java.io.IOException
    */
-  public static void replicateWALEntry(final AdminService.BlockingInterface admin,
-      final Entry[] entries, String replicationClusterId, Path sourceBaseNamespaceDir,
-      Path sourceHFileArchiveDir) throws IOException {
-    Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
-        buildReplicateWALEntryRequest(entries, null, replicationClusterId, sourceBaseNamespaceDir,
-          sourceHFileArchiveDir);
-    HBaseRpcController controller = new HBaseRpcControllerImpl(p.getSecond());
+  public static void replicateWALEntry(AsyncRegionServerAdmin admin, Entry[] entries,
+      String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir)
+      throws IOException {
+    Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(
+      entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
     try {
-      admin.replicateWALEntry(controller, p.getFirst());
-    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {
-      throw ProtobufUtil.getServiceException(e);
+      admin.replicateWALEntry(p.getFirst(), p.getSecond()).get();
+    } catch (InterruptedException e) {
+      throw (IOException) new InterruptedIOException().initCause(e);
+    } catch (ExecutionException e) {
+      Throwable cause = e.getCause();
+      Throwables.propagateIfPossible(cause, IOException.class);
+      throw new IOException(e);
     }
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index 7db53aa..0359096 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -39,7 +39,6 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -48,13 +47,16 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
@@ -65,8 +67,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-
 /**
  * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint}
  * implementation for replicating to another HBase cluster.
@@ -85,8 +85,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
 
   private static final long DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER = 2;
 
-  private ClusterConnection conn;
-  private Configuration localConf;
+  private AsyncClusterConnection conn;
   private Configuration conf;
   // How long should we sleep for each retry
   private long sleepForRetries;
@@ -117,7 +116,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
   public void init(Context context) throws IOException {
     super.init(context);
     this.conf = HBaseConfiguration.create(ctx.getConfiguration());
-    this.localConf = HBaseConfiguration.create(ctx.getLocalConfiguration());
     decorateConf();
     this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
     this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
@@ -132,12 +130,13 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
     // TODO: This connection is replication specific or we should make it particular to
     // replication and make replication specific settings such as compression or codec to use
     // passing Cells.
-    this.conn = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
+    this.conn =
+      ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent());
     this.sleepForRetries =
         this.conf.getLong("replication.source.sleepforretries", 1000);
     this.metrics = context.getMetrics();
     // ReplicationQueueInfo parses the peerId out of the znode for us
-    this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
+    this.replicationSinkMgr = new ReplicationSinkManager(conn, this, this.conf);
     // per sink thread pool
     this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY,
       HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
@@ -284,9 +283,10 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
   }
 
   private void reconnectToPeerCluster() {
-    ClusterConnection connection = null;
+    AsyncClusterConnection connection = null;
     try {
-      connection = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
+      connection =
+        ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent());
     } catch (IOException ioe) {
       LOG.warn("Failed to create connection for peer cluster", ioe);
     }
@@ -367,7 +367,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
         }
         continue;
       }
-      if (this.conn == null || this.conn.isClosed()) {
+      if (this.conn == null) {
         reconnectToPeerCluster();
       }
       try {
@@ -480,10 +480,11 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
           entriesHashCode, entries.size(), size, replicationClusterId);
       }
       sinkPeer = replicationSinkMgr.getReplicationSink();
-      BlockingInterface rrs = sinkPeer.getRegionServer();
+      AsyncRegionServerAdmin rsAdmin = sinkPeer.getRegionServer();
       try {
-        ReplicationProtbufUtil.replicateWALEntry(rrs, entries.toArray(new Entry[entries.size()]),
-          replicationClusterId, baseNamespaceDir, hfileArchiveDir);
+        ReplicationProtbufUtil.replicateWALEntry(rsAdmin,
+          entries.toArray(new Entry[entries.size()]), replicationClusterId, baseNamespaceDir,
+          hfileArchiveDir);
         LOG.trace("Completed replicating batch {}", entriesHashCode);
       } catch (IOException e) {
         LOG.trace("Failed replicating batch {}", entriesHashCode, e);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
index 3cd7884..21b07ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
@@ -21,11 +21,11 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -35,8 +35,6 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-
 /**
  * Maintains a collection of peers to replicate to, and randomly selects a
  * single peer to replicate to per set of data to replicate. Also handles
@@ -61,9 +59,7 @@ public class ReplicationSinkManager {
   static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f;
 
 
-  private final Connection conn;
-
-  private final String peerClusterId;
+  private final AsyncClusterConnection conn;
 
   private final HBaseReplicationEndpoint endpoint;
 
@@ -77,8 +73,6 @@ public class ReplicationSinkManager {
   // replication sinks is refreshed
   private final int badSinkThreshold;
 
-  private final Random random;
-
   // A timestamp of the last time the list of replication peers changed
   private long lastUpdateToPeers;
 
@@ -88,26 +82,22 @@ public class ReplicationSinkManager {
   /**
    * Instantiate for a single replication peer cluster.
    * @param conn connection to the peer cluster
-   * @param peerClusterId identifier of the peer cluster
    * @param endpoint replication endpoint for inter cluster replication
    * @param conf HBase configuration, used for determining replication source ratio and bad peer
    *          threshold
    */
-  public ReplicationSinkManager(ClusterConnection conn, String peerClusterId,
-      HBaseReplicationEndpoint endpoint, Configuration conf) {
+  public ReplicationSinkManager(AsyncClusterConnection conn, HBaseReplicationEndpoint endpoint,
+      Configuration conf) {
     this.conn = conn;
-    this.peerClusterId = peerClusterId;
     this.endpoint = endpoint;
     this.badReportCounts = Maps.newHashMap();
     this.ratio = conf.getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO);
-    this.badSinkThreshold = conf.getInt("replication.bad.sink.threshold",
-                                        DEFAULT_BAD_SINK_THRESHOLD);
-    this.random = new Random();
+    this.badSinkThreshold =
+      conf.getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD);
   }
 
   /**
    * Get a randomly-chosen replication sink to replicate to.
-   *
    * @return a replication sink to replicate to
    */
   public synchronized SinkPeer getReplicationSink() throws IOException {
@@ -119,8 +109,8 @@ public class ReplicationSinkManager {
     if (sinks.isEmpty()) {
       throw new IOException("No replication sinks are available");
     }
-    ServerName serverName = sinks.get(random.nextInt(sinks.size()));
-    return new SinkPeer(serverName, ((ClusterConnection) conn).getAdmin(serverName));
+    ServerName serverName = sinks.get(ThreadLocalRandom.current().nextInt(sinks.size()));
+    return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName));
   }
 
   /**
@@ -160,7 +150,7 @@ public class ReplicationSinkManager {
    */
   public synchronized void chooseSinks() {
     List<ServerName> slaveAddresses = endpoint.getRegionServers();
-    Collections.shuffle(slaveAddresses, random);
+    Collections.shuffle(slaveAddresses, ThreadLocalRandom.current());
     int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio);
     sinks = slaveAddresses.subList(0, numSinks);
     lastUpdateToPeers = System.currentTimeMillis();
@@ -182,9 +172,9 @@ public class ReplicationSinkManager {
    */
   public static class SinkPeer {
     private ServerName serverName;
-    private AdminService.BlockingInterface regionServer;
+    private AsyncRegionServerAdmin regionServer;
 
-    public SinkPeer(ServerName serverName, AdminService.BlockingInterface regionServer) {
+    public SinkPeer(ServerName serverName, AsyncRegionServerAdmin regionServer) {
       this.serverName = serverName;
       this.regionServer = regionServer;
     }
@@ -193,10 +183,8 @@ public class ReplicationSinkManager {
       return serverName;
     }
 
-    public AdminService.BlockingInterface getRegionServer() {
+    public AsyncRegionServerAdmin getRegionServer() {
       return regionServer;
     }
-
   }
-
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
index f373590..e0d112d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.StartMiniClusterOption;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -250,19 +250,19 @@ public class SyncReplicationTestBase {
   protected final void verifyReplicationRequestRejection(HBaseTestingUtility utility,
       boolean expectedRejection) throws Exception {
     HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
-    ClusterConnection connection = regionServer.getClusterConnection();
+    AsyncClusterConnection connection = regionServer.getAsyncClusterConnection();
     Entry[] entries = new Entry[10];
     for (int i = 0; i < entries.length; i++) {
       entries[i] =
         new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit());
     }
     if (!expectedRejection) {
-      ReplicationProtbufUtil.replicateWALEntry(connection.getAdmin(regionServer.getServerName()),
-        entries, null, null, null);
+      ReplicationProtbufUtil.replicateWALEntry(
+        connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null);
     } else {
       try {
-        ReplicationProtbufUtil.replicateWALEntry(connection.getAdmin(regionServer.getServerName()),
-          entries, null, null, null);
+        ReplicationProtbufUtil.replicateWALEntry(
+          connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null);
         fail("Should throw IOException when sync-replication state is in A or DA");
       } catch (DoNotRetryIOException e) {
         assertTrue(e.getMessage().contains("Reject to apply to sink cluster"));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
index 39dabb4..60afd40 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
@@ -25,7 +25,8 @@ import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
@@ -37,8 +38,6 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-
 @Category({ReplicationTests.class, SmallTests.class})
 public class TestReplicationSinkManager {
 
@@ -46,16 +45,14 @@ public class TestReplicationSinkManager {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestReplicationSinkManager.class);
 
-  private static final String PEER_CLUSTER_ID = "PEER_CLUSTER_ID";
-
   private HBaseReplicationEndpoint replicationEndpoint;
   private ReplicationSinkManager sinkManager;
 
   @Before
   public void setUp() {
     replicationEndpoint = mock(HBaseReplicationEndpoint.class);
-    sinkManager = new ReplicationSinkManager(mock(ClusterConnection.class),
-                      PEER_CLUSTER_ID, replicationEndpoint, new Configuration());
+    sinkManager = new ReplicationSinkManager(mock(AsyncClusterConnection.class),
+      replicationEndpoint, new Configuration());
   }
 
   @Test
@@ -100,7 +97,7 @@ public class TestReplicationSinkManager {
     // Sanity check
     assertEquals(1, sinkManager.getNumSinks());
 
-    SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AdminService.BlockingInterface.class));
+    SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class));
 
     sinkManager.reportBadSink(sinkPeer);
 
@@ -131,7 +128,7 @@ public class TestReplicationSinkManager {
 
     ServerName serverName = sinkManager.getSinksForTesting().get(0);
 
-    SinkPeer sinkPeer = new SinkPeer(serverName, mock(AdminService.BlockingInterface.class));
+    SinkPeer sinkPeer = new SinkPeer(serverName, mock(AsyncRegionServerAdmin.class));
 
     sinkManager.reportSinkSuccess(sinkPeer); // has no effect, counter does not go negative
     for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) {
@@ -147,7 +144,7 @@ public class TestReplicationSinkManager {
     //
     serverName = sinkManager.getSinksForTesting().get(0);
 
-    sinkPeer = new SinkPeer(serverName, mock(AdminService.BlockingInterface.class));
+    sinkPeer = new SinkPeer(serverName, mock(AsyncRegionServerAdmin.class));
     for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD-1; i++) {
       sinkManager.reportBadSink(sinkPeer);
     }
@@ -188,8 +185,8 @@ public class TestReplicationSinkManager {
     ServerName serverNameA = sinkList.get(0);
     ServerName serverNameB = sinkList.get(1);
 
-    SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AdminService.BlockingInterface.class));
-    SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AdminService.BlockingInterface.class));
+    SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class));
+    SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AsyncRegionServerAdmin.class));
 
     for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) {
       sinkManager.reportBadSink(sinkPeerA);


[hbase] 01/09: HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 3e75b60870068dc36a2c93d12c1fb0421dc98950
Author: zhangduo <zh...@apache.org>
AuthorDate: Fri Nov 30 08:23:47 2018 +0800

    HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer
---
 .../hbase/client/AsyncClusterConnection.java       | 38 +++++++++++++
 .../hadoop/hbase/client/AsyncConnectionImpl.java   | 14 +++--
 .../hbase/client/ClusterConnectionFactory.java     | 63 ++++++++++++++++++++++
 .../hadoop/hbase/client/ConnectionFactory.java     |  2 +-
 .../apache/hadoop/hbase/util/ReflectionUtils.java  | 22 ++++----
 .../main/java/org/apache/hadoop/hbase/Server.java  | 20 +++++++
 .../org/apache/hadoop/hbase/master/HMaster.java    |  3 ++
 .../hadoop/hbase/regionserver/HRegionServer.java   | 56 +++++++++++++------
 .../regionserver/ReplicationSyncUp.java            |  6 +++
 .../hadoop/hbase/MockRegionServerServices.java     |  5 ++
 .../client/TestAsyncNonMetaRegionLocator.java      |  2 +-
 ...stAsyncNonMetaRegionLocatorConcurrenyLimit.java |  2 +-
 .../hbase/client/TestAsyncRegionLocator.java       |  2 +-
 .../TestAsyncSingleRequestRpcRetryingCaller.java   |  4 +-
 .../hbase/client/TestAsyncTableNoncedRetry.java    |  2 +-
 .../hbase/master/MockNoopMasterServices.java       |  6 +++
 .../hadoop/hbase/master/MockRegionServer.java      |  5 ++
 .../hbase/master/TestActiveMasterManager.java      |  6 +++
 .../hbase/master/cleaner/TestHFileCleaner.java     |  6 +++
 .../hbase/master/cleaner/TestHFileLinkCleaner.java |  6 +++
 .../hbase/master/cleaner/TestLogsCleaner.java      |  6 +++
 .../cleaner/TestReplicationHFileCleaner.java       |  6 +++
 .../hbase/regionserver/TestHeapMemoryManager.java  |  6 +++
 .../hbase/regionserver/TestSplitLogWorker.java     |  6 +++
 .../hadoop/hbase/regionserver/TestWALLockup.java   |  6 +++
 .../replication/TestReplicationTrackerZKImpl.java  |  6 +++
 .../regionserver/TestReplicationSourceManager.java |  6 +++
 .../security/token/TestTokenAuthentication.java    |  6 +++
 .../org/apache/hadoop/hbase/util/MockServer.java   |  6 +++
 29 files changed, 289 insertions(+), 35 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
new file mode 100644
index 0000000..c7dea25
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The asynchronous connection for internal usage.
+ */
+@InterfaceAudience.Private
+public interface AsyncClusterConnection extends AsyncConnection {
+
+  /**
+   * Get the nonce generator for this connection.
+   */
+  NonceGenerator getNonceGenerator();
+
+  /**
+   * Get the rpc client we used to communicate with other servers.
+   */
+  RpcClient getRpcClient();
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 4a32546..81dafaf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLE
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import java.io.IOException;
+import java.net.SocketAddress;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -63,7 +64,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterServ
  * The implementation of AsyncConnection.
  */
 @InterfaceAudience.Private
-class AsyncConnectionImpl implements AsyncConnection {
+class AsyncConnectionImpl implements AsyncClusterConnection {
 
   private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionImpl.class);
 
@@ -106,7 +107,7 @@ class AsyncConnectionImpl implements AsyncConnection {
   private ChoreService authService;
 
   public AsyncConnectionImpl(Configuration conf, AsyncRegistry registry, String clusterId,
-      User user) {
+      SocketAddress localAddress, User user) {
     this.conf = conf;
     this.user = user;
     if (user.isLoginFromKeytab()) {
@@ -114,7 +115,7 @@ class AsyncConnectionImpl implements AsyncConnection {
     }
     this.connConf = new AsyncConnectionConfiguration(conf);
     this.registry = registry;
-    this.rpcClient = RpcClientFactory.createClient(conf, clusterId);
+    this.rpcClient = RpcClientFactory.createClient(conf, clusterId, localAddress, null);
     this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
     this.hostnameCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true);
     this.rpcTimeout =
@@ -158,11 +159,16 @@ class AsyncConnectionImpl implements AsyncConnection {
   }
 
   // ditto
-  @VisibleForTesting
+  @Override
   public NonceGenerator getNonceGenerator() {
     return nonceGenerator;
   }
 
+  @Override
+  public RpcClient getRpcClient() {
+    return rpcClient;
+  }
+
   private ClientService.Interface createRegionServerStub(ServerName serverName) throws IOException {
     return ClientService.newStub(rpcClient.createRpcChannel(serverName, user, rpcTimeout));
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
new file mode 100644
index 0000000..68c0630
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.net.SocketAddress;
+import java.util.concurrent.ExecutionException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+
+/**
+ * The factory for creating {@link AsyncClusterConnection}.
+ */
+@InterfaceAudience.Private
+public final class ClusterConnectionFactory {
+
+  private ClusterConnectionFactory() {
+  }
+
+  /**
+   * Create a new {@link AsyncClusterConnection} instance.
+   * <p/>
+   * Unlike what we have done in {@link ConnectionFactory}, here we just return an
+   * {@link AsyncClusterConnection} instead of a {@link java.util.concurrent.CompletableFuture},
+   * which means this method could block on fetching the cluster id. This is just used to simplify
+   * the implementation, as when starting new region servers, we do not need to be event-driven. Can
+   * change later if we want a {@link java.util.concurrent.CompletableFuture} here.
+   */
+  public static AsyncClusterConnection createAsyncClusterConnection(Configuration conf,
+      SocketAddress localAddress, User user) throws IOException {
+    AsyncRegistry registry = AsyncRegistryFactory.getRegistry(conf);
+    String clusterId;
+    try {
+      clusterId = registry.getClusterId().get();
+    } catch (InterruptedException e) {
+      throw (IOException) new InterruptedIOException().initCause(e);
+    } catch (ExecutionException e) {
+      Throwable cause = e.getCause();
+      Throwables.propagateIfPossible(cause, IOException.class);
+      throw new IOException(cause);
+    }
+    return new AsyncConnectionImpl(conf, registry, clusterId, localAddress, user);
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index e3e87f6..d4e50ed 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -297,7 +297,7 @@ public class ConnectionFactory {
       try {
         future.complete(
           user.runAs((PrivilegedExceptionAction<? extends AsyncConnection>) () -> ReflectionUtils
-            .newInstance(clazz, conf, registry, clusterId, user)));
+            .newInstance(clazz, conf, registry, clusterId, null, user)));
       } catch (Exception e) {
         future.completeExceptionally(e);
       }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
index a136846..268249d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
@@ -83,15 +83,19 @@ public class ReflectionUtils {
 
       boolean match = true;
       for (int i = 0; i < ctorParamTypes.length && match; ++i) {
-        Class<?> paramType = paramTypes[i].getClass();
-        match = (!ctorParamTypes[i].isPrimitive()) ? ctorParamTypes[i].isAssignableFrom(paramType) :
-                  ((int.class.equals(ctorParamTypes[i]) && Integer.class.equals(paramType)) ||
-                   (long.class.equals(ctorParamTypes[i]) && Long.class.equals(paramType)) ||
-                   (double.class.equals(ctorParamTypes[i]) && Double.class.equals(paramType)) ||
-                   (char.class.equals(ctorParamTypes[i]) && Character.class.equals(paramType)) ||
-                   (short.class.equals(ctorParamTypes[i]) && Short.class.equals(paramType)) ||
-                   (boolean.class.equals(ctorParamTypes[i]) && Boolean.class.equals(paramType)) ||
-                   (byte.class.equals(ctorParamTypes[i]) && Byte.class.equals(paramType)));
+        if (paramTypes[i] == null) {
+          match = !ctorParamTypes[i].isPrimitive();
+        } else {
+          Class<?> paramType = paramTypes[i].getClass();
+          match = (!ctorParamTypes[i].isPrimitive()) ? ctorParamTypes[i].isAssignableFrom(paramType)
+            : ((int.class.equals(ctorParamTypes[i]) && Integer.class.equals(paramType)) ||
+              (long.class.equals(ctorParamTypes[i]) && Long.class.equals(paramType)) ||
+              (double.class.equals(ctorParamTypes[i]) && Double.class.equals(paramType)) ||
+              (char.class.equals(ctorParamTypes[i]) && Character.class.equals(paramType)) ||
+              (short.class.equals(ctorParamTypes[i]) && Short.class.equals(paramType)) ||
+              (boolean.class.equals(ctorParamTypes[i]) && Boolean.class.equals(paramType)) ||
+              (byte.class.equals(ctorParamTypes[i]) && Byte.class.equals(paramType)));
+        }
       }
 
       if (match) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index fb898ea..c33d5af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -61,6 +63,24 @@ public interface Server extends Abortable, Stoppable {
   ClusterConnection getClusterConnection();
 
   /**
+   * Returns a reference to the servers' async connection.
+   * <p/>
+   * Important note: this method returns a reference to Connection which is managed by Server
+   * itself, so callers must NOT attempt to close connection obtained.
+   */
+  default AsyncConnection getAsyncConnection() {
+    return getAsyncClusterConnection();
+  }
+
+  /**
+   * Returns a reference to the servers' async cluster connection.
+   * <p/>
+   * Important note: this method returns a reference to Connection which is managed by Server
+   * itself, so callers must NOT attempt to close connection obtained.
+   */
+  AsyncClusterConnection getAsyncClusterConnection();
+
+  /**
    * @return The unique server name for this server.
    */
   ServerName getServerName();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 9d2a743..7579fd5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3033,6 +3033,9 @@ public class HMaster extends HRegionServer implements MasterServices {
     if (this.clusterConnection != null) {
       this.clusterConnection.close();
     }
+    if (this.asyncClusterConnection != null) {
+      this.asyncClusterConnection.close();
+    }
   }
 
   public void stopMaster() throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 34a6c13..dbc5e77 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -82,7 +82,9 @@ import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.ZNodeClearer;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -109,7 +111,6 @@ import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper;
 import org.apache.hadoop.hbase.ipc.RpcClient;
-import org.apache.hadoop.hbase.ipc.RpcClientFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
@@ -267,6 +268,11 @@ public class HRegionServer extends HasThread implements
   protected ClusterConnection clusterConnection;
 
   /**
+   * The asynchronous cluster connection to be shared by services.
+   */
+  protected AsyncClusterConnection asyncClusterConnection;
+
+  /**
    * Go here to get table descriptors.
    */
   protected TableDescriptors tableDescriptors;
@@ -783,11 +789,7 @@ public class HRegionServer extends HasThread implements
     return true;
   }
 
-  /**
-   * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the
-   * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server.
-   */
-  private ClusterConnection createClusterConnection() throws IOException {
+  private Configuration unsetClientZookeeperQuorum() {
     Configuration conf = this.conf;
     if (conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) {
       // Use server ZK cluster for server-issued connections, so we clone
@@ -795,11 +797,20 @@ public class HRegionServer extends HasThread implements
       conf = new Configuration(this.conf);
       conf.unset(HConstants.CLIENT_ZOOKEEPER_QUORUM);
     }
+    return conf;
+  }
+
+  /**
+   * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the
+   * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server.
+   */
+  private ClusterConnection createClusterConnection() throws IOException {
     // Create a cluster connection that when appropriate, can short-circuit and go directly to the
     // local server if the request is to the local server bypassing RPC. Can be used for both local
     // and remote invocations.
-    ClusterConnection conn = ConnectionUtils.createShortCircuitConnection(conf, null,
-      userProvider.getCurrent(), serverName, rpcServices, rpcServices);
+    ClusterConnection conn =
+      ConnectionUtils.createShortCircuitConnection(unsetClientZookeeperQuorum(), null,
+        userProvider.getCurrent(), serverName, rpcServices, rpcServices);
     // This is used to initialize the batch thread pool inside the connection implementation.
     // When deploy a fresh cluster, we may first use the cluster connection in InitMetaProcedure,
     // which will be executed inside the PEWorker, and then the batch thread pool will inherit the
@@ -833,9 +844,12 @@ public class HRegionServer extends HasThread implements
   /**
    * Setup our cluster connection if not already initialized.
    */
-  protected synchronized void setupClusterConnection() throws IOException {
+  protected final synchronized void setupClusterConnection() throws IOException {
     if (clusterConnection == null) {
       clusterConnection = createClusterConnection();
+      asyncClusterConnection =
+        ClusterConnectionFactory.createAsyncClusterConnection(unsetClientZookeeperQuorum(),
+          new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), userProvider.getCurrent());
     }
   }
 
@@ -849,8 +863,7 @@ public class HRegionServer extends HasThread implements
       initializeZooKeeper();
       setupClusterConnection();
       // Setup RPC client for master communication
-      this.rpcClient = RpcClientFactory.createClient(conf, clusterId, new InetSocketAddress(
-          this.rpcServices.isa.getAddress(), 0), clusterConnection.getConnectionMetrics());
+      this.rpcClient = asyncClusterConnection.getRpcClient();
     } catch (Throwable t) {
       // Call stop if error or process will stick around for ever since server
       // puts up non-daemon threads.
@@ -1114,7 +1127,15 @@ public class HRegionServer extends HasThread implements
         LOG.warn("Attempt to close server's short circuit ClusterConnection failed.", e);
       }
     }
-
+    if (this.asyncClusterConnection != null) {
+      try {
+        this.asyncClusterConnection.close();
+      } catch (IOException e) {
+        // Although the {@link Closeable} interface throws an {@link
+        // IOException}, in reality, the implementation would never do that.
+        LOG.warn("Attempt to close server's AsyncClusterConnection failed.", e);
+      }
+    }
     // Closing the compactSplit thread before closing meta regions
     if (!this.killed && containsMetaTableRegions()) {
       if (!abortRequested || this.fsOk) {
@@ -3747,9 +3768,9 @@ public class HRegionServer extends HasThread implements
   }
 
   @Override
-  public EntityLock regionLock(List<RegionInfo> regionInfos, String description,
-      Abortable abort) throws IOException {
-    return new LockServiceClient(conf, lockStub, clusterConnection.getNonceGenerator())
+  public EntityLock regionLock(List<RegionInfo> regionInfos, String description, Abortable abort)
+      throws IOException {
+    return new LockServiceClient(conf, lockStub, asyncClusterConnection.getNonceGenerator())
       .regionLock(regionInfos, description, abort);
   }
 
@@ -3854,4 +3875,9 @@ public class HRegionServer extends HasThread implements
       System.exit(1);
     }
   }
+
+  @Override
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return asyncClusterConnection;
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index c7bccb3..7d1245c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -180,5 +181,10 @@ public class ReplicationSyncUp extends Configured implements Tool {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 0e4f241..5205960 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -368,4 +369,8 @@ public class MockRegionServerServices implements RegionServerServices {
   public Optional<MobFileCache> getMobFileCache() {
     return Optional.empty();
   }
+
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return null;
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
index eeaf99f..550a6f3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
@@ -81,7 +81,7 @@ public class TestAsyncNonMetaRegionLocator {
     TEST_UTIL.getAdmin().balancerSwitch(false, true);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-      registry.getClusterId().get(), User.getCurrent());
+      registry.getClusterId().get(), null, User.getCurrent());
     LOCATOR = new AsyncNonMetaRegionLocator(CONN);
     SPLIT_KEYS = new byte[8][];
     for (int i = 111; i < 999; i += 111) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
index 8cdb4a9..7e06218 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
@@ -125,7 +125,7 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
     TEST_UTIL.getAdmin().balancerSwitch(false, true);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-      registry.getClusterId().get(), User.getCurrent());
+      registry.getClusterId().get(), null, User.getCurrent());
     LOCATOR = new AsyncNonMetaRegionLocator(CONN);
     SPLIT_KEYS = IntStream.range(1, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i)))
       .toArray(byte[][]::new);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
index 0b51ce8..c6b1e75 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
@@ -99,7 +99,7 @@ public class TestAsyncRegionLocator {
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-      registry.getClusterId().get(), User.getCurrent());
+      registry.getClusterId().get(), null, User.getCurrent());
     LOCATOR = CONN.getLocator();
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
index 7d8956b..29dcd56 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
@@ -73,7 +73,7 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-      registry.getClusterId().get(), User.getCurrent());
+      registry.getClusterId().get(), null, User.getCurrent());
   }
 
   @AfterClass
@@ -164,7 +164,7 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
         }
       };
     try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(),
-      CONN.registry, CONN.registry.getClusterId().get(), User.getCurrent()) {
+      CONN.registry, CONN.registry.getClusterId().get(), null, User.getCurrent()) {
 
       @Override
       AsyncRegionLocator getLocator() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java
index 3008561..e1e55f5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java
@@ -85,7 +85,7 @@ public class TestAsyncTableNoncedRetry {
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     ASYNC_CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-        registry.getClusterId().get(), User.getCurrent()) {
+      registry.getClusterId().get(), null, User.getCurrent()) {
 
       @Override
       public NonceGenerator getNonceGenerator() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 9c55f57..3ebad66 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
@@ -473,4 +474,9 @@ public class MockNoopMasterServices implements MasterServices {
   public SyncReplicationReplayWALManager getSyncReplicationReplayWALManager() {
     return null;
   }
+
+  @Override
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return null;
+  }
 }
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index a930d7f..73d53c7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -721,4 +722,8 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
   public Optional<MobFileCache> getMobFileCache() {
     return Optional.empty();
   }
+
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return null;
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 2300f54..77667a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -349,5 +350,10 @@ public class TestActiveMasterManager {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index 5c8db3e..c5fad32 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -279,6 +280,11 @@ public class TestHFileCleaner {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   @Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index 119194b..fd11ff8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.HFileLink;
@@ -213,5 +214,10 @@ public class TestHFileLinkCleaner {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index 4d254a0..6a5fe9c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -411,6 +412,11 @@ public class TestLogsCleaner {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   static class FaultyZooKeeperWatcher extends ZKWatcher {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index d162bf3..9791643 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -303,6 +304,11 @@ public class TestReplicationHFileCleaner {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   static class FaultyZooKeeperWatcher extends ZKWatcher {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 8c9ce75..4a359e4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -862,6 +863,11 @@ public class TestHeapMemoryManager {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   static class CustomHeapMemoryTuner implements HeapMemoryTuner {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
index 14dc619..43da846 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
@@ -160,6 +161,11 @@ public class TestSplitLogWorker {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   private void waitForCounter(LongAdder ctr, long oldval, long newval, long timems)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index 0e20252..9e9d1d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Durability;
@@ -523,6 +524,11 @@ public class TestWALLockup {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   static class DummyWALActionsListener implements WALActionsListener {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index 863d558..62ab265 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -263,5 +264,10 @@ public class TestReplicationTrackerZKImpl {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 86bbb09..427f319 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
@@ -906,5 +907,10 @@ public abstract class TestReplicationSourceManager {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index e4780f1..92c8e54 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -363,6 +364,11 @@ public class TestTokenAuthentication {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   @Parameters(name = "{index}: rpcServerImpl={0}")
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index c25db01..13212d2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.log.HBaseMarkers;
@@ -143,4 +144,9 @@ public class MockServer implements Server {
   public Connection createConnection(Configuration conf) throws IOException {
     return null;
   }
+
+  @Override
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return null;
+  }
 }


[hbase] 06/09: HBASE-21671 Rewrite RegionReplicaReplicationEndpoint to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 975cd99d15b7fcb191d645c8f744921ae60d8934
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Fri Jan 11 16:22:24 2019 +0800

    HBASE-21671 Rewrite RegionReplicaReplicationEndpoint to use AsyncClusterConnection
---
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |  24 +-
 .../hbase/client/AsyncClusterConnection.java       |  17 +
 .../hbase/client/AsyncClusterConnectionImpl.java   |  80 +++
 .../AsyncRegionReplicaReplayRetryingCaller.java    | 146 ++++
 .../hbase/client/AsyncRegionServerAdmin.java       |   5 +-
 .../hbase/client/ClusterConnectionFactory.java     |   2 +-
 .../hbase/protobuf/ReplicationProtbufUtil.java     |  31 +-
 .../handler/RegionReplicaFlushHandler.java         |   3 +-
 .../hbase/replication/ReplicationEndpoint.java     |  35 +-
 .../RegionReplicaReplicationEndpoint.java          | 782 +++++++--------------
 .../regionserver/ReplicationSource.java            |   2 +-
 .../hbase/client/TestAsyncTableNoncedRetry.java    |   2 +-
 .../TestRegionReplicaReplicationEndpoint.java      |  56 +-
 ...stRegionReplicaReplicationEndpointNoMaster.java |  99 ++-
 14 files changed, 627 insertions(+), 657 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 0808e90..cc0e075 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -55,7 +55,6 @@ import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
@@ -65,7 +64,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterServ
  * The implementation of AsyncConnection.
  */
 @InterfaceAudience.Private
-class AsyncConnectionImpl implements AsyncClusterConnection {
+class AsyncConnectionImpl implements AsyncConnection {
 
   private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionImpl.class);
 
@@ -85,7 +84,7 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
 
   private final int rpcTimeout;
 
-  private final RpcClient rpcClient;
+  protected final RpcClient rpcClient;
 
   final RpcControllerFactory rpcControllerFactory;
 
@@ -160,16 +159,10 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
   }
 
   // ditto
-  @Override
-  public NonceGenerator getNonceGenerator() {
+  NonceGenerator getNonceGenerator() {
     return nonceGenerator;
   }
 
-  @Override
-  public RpcClient getRpcClient() {
-    return rpcClient;
-  }
-
   private ClientService.Interface createRegionServerStub(ServerName serverName) throws IOException {
     return ClientService.newStub(rpcClient.createRpcChannel(serverName, user, rpcTimeout));
   }
@@ -360,15 +353,4 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
     return new HBaseHbck(MasterProtos.HbckService.newBlockingStub(
       rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout)), rpcControllerFactory);
   }
-
-  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
-    return new AsyncRegionServerAdmin(serverName, this);
-  }
-
-  @Override
-  public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
-      boolean writeFlushWALMarker) {
-    RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin();
-    return admin.flushRegionInternal(regionName, writeFlushWALMarker);
-  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
similarity index 72%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index f1f64ca..0ad77ba 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.List;
 import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
@@ -49,4 +53,17 @@ public interface AsyncClusterConnection extends AsyncConnection {
    * Flush a region and get the response.
    */
   CompletableFuture<FlushRegionResponse> flush(byte[] regionName, boolean writeFlushWALMarker);
+
+  /**
+   * Replicate wal edits for replica regions. The return value is the edits we skipped, as the
+   * original return value is useless.
+   */
+  CompletableFuture<Long> replay(TableName tableName, byte[] encodedRegionName, byte[] row,
+      List<Entry> entries, int replicaId, int numRetries, long operationTimeoutNs);
+
+  /**
+   * Return all the replicas for a region. Used for regiong replica replication.
+   */
+  CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
+      boolean reload);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
new file mode 100644
index 0000000..d61f01f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.net.SocketAddress;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+
+/**
+ * The implementation of AsyncClusterConnection.
+ */
+@InterfaceAudience.Private
+class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClusterConnection {
+
+  public AsyncClusterConnectionImpl(Configuration conf, AsyncRegistry registry, String clusterId,
+      SocketAddress localAddress, User user) {
+    super(conf, registry, clusterId, localAddress, user);
+  }
+
+  @Override
+  public NonceGenerator getNonceGenerator() {
+    return super.getNonceGenerator();
+  }
+
+  @Override
+  public RpcClient getRpcClient() {
+    return rpcClient;
+  }
+
+  @Override
+  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
+    return new AsyncRegionServerAdmin(serverName, this);
+  }
+
+  @Override
+  public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
+      boolean writeFlushWALMarker) {
+    RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin();
+    return admin.flushRegionInternal(regionName, writeFlushWALMarker);
+  }
+
+  @Override
+  public CompletableFuture<Long> replay(TableName tableName, byte[] encodedRegionName, byte[] row,
+      List<Entry> entries, int replicaId, int retries, long operationTimeoutNs) {
+    return new AsyncRegionReplicaReplayRetryingCaller(RETRY_TIMER, this,
+      ConnectionUtils.retries2Attempts(retries), operationTimeoutNs, tableName, encodedRegionName,
+      row, entries, replicaId).call();
+  }
+
+  @Override
+  public CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
+      boolean reload) {
+    return getLocator().getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L);
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java
new file mode 100644
index 0000000..3364f15
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
+
+/**
+ * For replaying edits for region replica.
+ * <p/>
+ * The mainly difference here is that, every time after locating, we will check whether the region
+ * name is equal, if not, we will give up, as this usually means the region has been split or
+ * merged, and the new region(s) should already have all the data of the parent region(s).
+ * <p/>
+ * Notice that, the return value is the edits we skipped, as the original response message is not
+ * used at upper layer.
+ */
+@InterfaceAudience.Private
+public class AsyncRegionReplicaReplayRetryingCaller extends AsyncRpcRetryingCaller<Long> {
+
+  private static final Logger LOG =
+    LoggerFactory.getLogger(AsyncRegionReplicaReplayRetryingCaller.class);
+
+  private final TableName tableName;
+
+  private final byte[] encodedRegionName;
+
+  private final byte[] row;
+
+  private final Entry[] entries;
+
+  private final int replicaId;
+
+  public AsyncRegionReplicaReplayRetryingCaller(HashedWheelTimer retryTimer,
+      AsyncClusterConnectionImpl conn, int maxAttempts, long operationTimeoutNs,
+      TableName tableName, byte[] encodedRegionName, byte[] row, List<Entry> entries,
+      int replicaId) {
+    super(retryTimer, conn, conn.connConf.getPauseNs(), maxAttempts, operationTimeoutNs,
+      conn.connConf.getWriteRpcTimeoutNs(), conn.connConf.getStartLogErrorsCnt());
+    this.tableName = tableName;
+    this.encodedRegionName = encodedRegionName;
+    this.row = row;
+    this.entries = entries.toArray(new Entry[0]);
+    this.replicaId = replicaId;
+  }
+
+  private void call(HRegionLocation loc) {
+    if (!Bytes.equals(encodedRegionName, loc.getRegion().getEncodedNameAsBytes())) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+          "Skipping {} entries in table {} because located region {} is different than" +
+            " the original region {} from WALEdit",
+          entries.length, tableName, loc.getRegion().getEncodedName(),
+          Bytes.toStringBinary(encodedRegionName));
+        for (Entry entry : entries) {
+          LOG.trace("Skipping : " + entry);
+        }
+      }
+      future.complete(Long.valueOf(entries.length));
+      return;
+    }
+
+    AdminService.Interface stub;
+    try {
+      stub = conn.getAdminStub(loc.getServerName());
+    } catch (IOException e) {
+      onError(e,
+        () -> "Get async admin stub to " + loc.getServerName() + " for '" +
+          Bytes.toStringBinary(row) + "' in " + loc.getRegion().getEncodedName() + " of " +
+          tableName + " failed",
+        err -> conn.getLocator().updateCachedLocationOnError(loc, err));
+      return;
+    }
+    Pair<ReplicateWALEntryRequest, CellScanner> p = ReplicationProtbufUtil
+      .buildReplicateWALEntryRequest(entries, encodedRegionName, null, null, null);
+    resetCallTimeout();
+    controller.setCellScanner(p.getSecond());
+    stub.replay(controller, p.getFirst(), r -> {
+      if (controller.failed()) {
+        onError(controller.getFailed(),
+          () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " +
+            loc.getRegion().getEncodedName() + " of " + tableName + " failed",
+          err -> conn.getLocator().updateCachedLocationOnError(loc, err));
+      } else {
+        future.complete(0L);
+      }
+    });
+
+  }
+
+  @Override
+  protected void doCall() {
+    long locateTimeoutNs;
+    if (operationTimeoutNs > 0) {
+      locateTimeoutNs = remainingTimeNs();
+      if (locateTimeoutNs <= 0) {
+        completeExceptionally();
+        return;
+      }
+    } else {
+      locateTimeoutNs = -1L;
+    }
+    addListener(conn.getLocator().getRegionLocation(tableName, row, replicaId,
+      RegionLocateType.CURRENT, locateTimeoutNs), (loc, error) -> {
+        if (error != null) {
+          onError(error,
+            () -> "Locate '" + Bytes.toStringBinary(row) + "' in " + tableName + " failed", err -> {
+            });
+          return;
+        }
+        call(loc);
+      });
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
similarity index 99%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
index b9141a9..d491890 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
@@ -164,8 +164,9 @@ public class AsyncRegionServerAdmin {
       cellScanner);
   }
 
-  public CompletableFuture<ReplicateWALEntryResponse> replay(ReplicateWALEntryRequest request) {
-    return call((stub, controller, done) -> stub.replay(controller, request, done));
+  public CompletableFuture<ReplicateWALEntryResponse> replay(ReplicateWALEntryRequest request,
+      CellScanner cellScanner) {
+    return call((stub, controller, done) -> stub.replay(controller, request, done), cellScanner);
   }
 
   public CompletableFuture<RollWALWriterResponse> rollWALWriter(RollWALWriterRequest request) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
similarity index 95%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
index 79484db..2670420 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -46,6 +46,6 @@ public final class ClusterConnectionFactory {
       SocketAddress localAddress, User user) throws IOException {
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(conf);
     String clusterId = FutureUtils.get(registry.getClusterId());
-    return new AsyncConnectionImpl(conf, registry, clusterId, localAddress, user);
+    return new AsyncClusterConnectionImpl(conf, registry, clusterId, localAddress, user);
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 9f41a76..c39c86c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -37,7 +37,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 
 @InterfaceAudience.Private
@@ -55,20 +56,18 @@ public class ReplicationProtbufUtil {
   public static void replicateWALEntry(AsyncRegionServerAdmin admin, Entry[] entries,
       String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir)
       throws IOException {
-    Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(
-      entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
+    Pair<ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(entries, null,
+      replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
     FutureUtils.get(admin.replicateWALEntry(p.getFirst(), p.getSecond()));
   }
 
   /**
    * Create a new ReplicateWALEntryRequest from a list of WAL entries
-   *
    * @param entries the WAL entries to be replicated
-   * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values
-   * found.
+   * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found.
    */
-  public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>
-      buildReplicateWALEntryRequest(final Entry[] entries) throws IOException {
+  public static Pair<ReplicateWALEntryRequest, CellScanner> buildReplicateWALEntryRequest(
+      final Entry[] entries) {
     return buildReplicateWALEntryRequest(entries, null, null, null, null);
   }
 
@@ -82,16 +81,14 @@ public class ReplicationProtbufUtil {
    * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
    * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found.
    */
-  public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>
-      buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName,
-          String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir)
-          throws IOException {
+  public static Pair<ReplicateWALEntryRequest, CellScanner> buildReplicateWALEntryRequest(
+      final Entry[] entries, byte[] encodedRegionName, String replicationClusterId,
+      Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) {
     // Accumulate all the Cells seen in here.
     List<List<? extends Cell>> allCells = new ArrayList<>(entries.length);
     int size = 0;
-    AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder();
-    AdminProtos.ReplicateWALEntryRequest.Builder builder =
-      AdminProtos.ReplicateWALEntryRequest.newBuilder();
+    WALEntry.Builder entryBuilder = WALEntry.newBuilder();
+    ReplicateWALEntryRequest.Builder builder = ReplicateWALEntryRequest.newBuilder();
 
     for (Entry entry: entries) {
       entryBuilder.clear();
@@ -99,8 +96,8 @@ public class ReplicationProtbufUtil {
       try {
         keyBuilder = entry.getKey().getBuilder(WALCellCodec.getNoneCompressor());
       } catch (IOException e) {
-        throw new IOException(
-            "There should not throw exception since NoneCompressor do not throw any exceptions", e);
+        throw new AssertionError(
+          "There should not throw exception since NoneCompressor do not throw any exceptions", e);
       }
       if(encodedRegionName != null){
         keyBuilder.setEncodedRegionName(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index 0729203..cc798cc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -185,7 +185,6 @@ public class RegionReplicaFlushHandler extends EventHandler {
             "Was not able to trigger a flush from primary region due to old server version? " +
               "Continuing to open the secondary region replica: " +
               region.getRegionInfo().getRegionNameAsString());
-          region.setReadsEnabled(true);
           break;
         }
       }
@@ -195,6 +194,6 @@ public class RegionReplicaFlushHandler extends EventHandler {
         throw new InterruptedIOException(e.getMessage());
       }
     }
+    region.setReadsEnabled(true);
   }
-
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
index f4c37b1..ca73663 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
@@ -29,6 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
@@ -53,6 +54,7 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener {
 
   @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
   class Context {
+    private final Server server;
     private final Configuration localConf;
     private final Configuration conf;
     private final FileSystem fs;
@@ -64,16 +66,11 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener {
     private final Abortable abortable;
 
     @InterfaceAudience.Private
-    public Context(
-        final Configuration localConf,
-        final Configuration conf,
-        final FileSystem fs,
-        final String peerId,
-        final UUID clusterId,
-        final ReplicationPeer replicationPeer,
-        final MetricsSource metrics,
-        final TableDescriptors tableDescriptors,
-        final Abortable abortable) {
+    public Context(final Server server, final Configuration localConf, final Configuration conf,
+        final FileSystem fs, final String peerId, final UUID clusterId,
+        final ReplicationPeer replicationPeer, final MetricsSource metrics,
+        final TableDescriptors tableDescriptors, final Abortable abortable) {
+      this.server = server;
       this.localConf = localConf;
       this.conf = conf;
       this.fs = fs;
@@ -84,34 +81,50 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener {
       this.tableDescriptors = tableDescriptors;
       this.abortable = abortable;
     }
+
+    public Server getServer() {
+      return server;
+    }
+
     public Configuration getConfiguration() {
       return conf;
     }
+
     public Configuration getLocalConfiguration() {
       return localConf;
     }
+
     public FileSystem getFilesystem() {
       return fs;
     }
+
     public UUID getClusterId() {
       return clusterId;
     }
+
     public String getPeerId() {
       return peerId;
     }
+
     public ReplicationPeerConfig getPeerConfig() {
       return replicationPeer.getPeerConfig();
     }
+
     public ReplicationPeer getReplicationPeer() {
       return replicationPeer;
     }
+
     public MetricsSource getMetrics() {
       return metrics;
     }
+
     public TableDescriptors getTableDescriptors() {
       return tableDescriptors;
     }
-    public Abortable getAbortable() { return abortable; }
+
+    public Abortable getAbortable() {
+      return abortable;
+    }
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index f7721e0..65cf9a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -19,67 +19,47 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.Callable;
+import java.util.Optional;
+import java.util.TreeMap;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.RetryingCallable;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
+import org.apache.hadoop.hbase.util.AtomicUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
-import org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
-import org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
-import org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
-import org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
-import org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.cache.Cache;
 import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
+import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
+import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
 
 /**
- * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint
- * which receives the WAL edits from the WAL, and sends the edits to replicas
- * of regions.
+ * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint which receives the WAL
+ * edits from the WAL, and sends the edits to replicas of regions.
  */
 @InterfaceAudience.Private
 public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
@@ -87,32 +67,55 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
   private static final Logger LOG = LoggerFactory.getLogger(RegionReplicaReplicationEndpoint.class);
 
   // Can be configured differently than hbase.client.retries.number
-  private static String CLIENT_RETRIES_NUMBER
-    = "hbase.region.replica.replication.client.retries.number";
+  private static String CLIENT_RETRIES_NUMBER =
+    "hbase.region.replica.replication.client.retries.number";
 
   private Configuration conf;
-  private ClusterConnection connection;
+  private AsyncClusterConnection connection;
   private TableDescriptors tableDescriptors;
 
-  // Reuse WALSplitter constructs as a WAL pipe
-  private PipelineController controller;
-  private RegionReplicaOutputSink outputSink;
-  private EntryBuffers entryBuffers;
+  private int numRetries;
+
+  private long operationTimeoutNs;
 
-  // Number of writer threads
-  private int numWriterThreads;
+  private LoadingCache<TableName, Optional<TableDescriptor>> tableDescriptorCache;
 
-  private int operationTimeout;
+  private Cache<TableName, TableName> disabledTableCache;
 
-  private ExecutorService pool;
+  private final RetryCounterFactory retryCounterFactory =
+    new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60000);
 
   @Override
   public void init(Context context) throws IOException {
     super.init(context);
-
-    this.conf = HBaseConfiguration.create(context.getConfiguration());
+    this.conf = context.getConfiguration();
     this.tableDescriptors = context.getTableDescriptors();
-
+    int memstoreReplicationEnabledCacheExpiryMs = conf
+      .getInt("hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000);
+    // A cache for the table "memstore replication enabled" flag.
+    // It has a default expiry of 5 sec. This means that if the table is altered
+    // with a different flag value, we might miss to replicate for that amount of
+    // time. But this cache avoid the slow lookup and parsing of the TableDescriptor.
+    tableDescriptorCache = CacheBuilder.newBuilder()
+      .expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS)
+      .initialCapacity(10).maximumSize(1000)
+      .build(new CacheLoader<TableName, Optional<TableDescriptor>>() {
+
+        @Override
+        public Optional<TableDescriptor> load(TableName tableName) throws Exception {
+          // check if the table requires memstore replication
+          // some unit-test drop the table, so we should do a bypass check and always replicate.
+          return Optional.ofNullable(tableDescriptors.get(tableName));
+        }
+      });
+    int nonExistentTableCacheExpiryMs =
+      conf.getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000);
+    // A cache for non existing tables that have a default expiry of 5 sec. This means that if the
+    // table is created again with the same name, we might miss to replicate for that amount of
+    // time. But this cache prevents overloading meta requests for every edit from a deleted file.
+    disabledTableCache = CacheBuilder.newBuilder()
+      .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS).initialCapacity(10)
+      .maximumSize(1000).build();
     // HRS multiplies client retries by 10 globally for meta operations, but we do not want this.
     // We are resetting it here because we want default number of retries (35) rather than 10 times
     // that which makes very long retries for disabled tables etc.
@@ -123,516 +126,261 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
         HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
       defaultNumRetries = defaultNumRetries / mult; // reset if HRS has multiplied this already
     }
-
-    conf.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 1);
-    int numRetries = conf.getInt(CLIENT_RETRIES_NUMBER, defaultNumRetries);
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries);
-
-    this.numWriterThreads = this.conf.getInt(
-      "hbase.region.replica.replication.writer.threads", 3);
-    controller = new PipelineController();
-    entryBuffers = new EntryBuffers(controller,
-        this.conf.getLong("hbase.region.replica.replication.buffersize", 128 * 1024 * 1024));
-
+    this.numRetries = conf.getInt(CLIENT_RETRIES_NUMBER, defaultNumRetries);
     // use the regular RPC timeout for replica replication RPC's
-    this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-  }
-
-  @Override
-  protected void doStart() {
-    try {
-      connection = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
-      this.pool = getDefaultThreadPool(conf);
-      outputSink = new RegionReplicaOutputSink(controller, tableDescriptors, entryBuffers,
-        connection, pool, numWriterThreads, operationTimeout);
-      outputSink.startWriterThreads();
-      super.doStart();
-    } catch (IOException ex) {
-      LOG.warn("Received exception while creating connection :" + ex);
-      notifyFailed(ex);
-    }
-  }
-
-  @Override
-  protected void doStop() {
-    if (outputSink != null) {
-      try {
-        outputSink.finishWritingAndClose();
-      } catch (IOException ex) {
-        LOG.warn("Got exception while trying to close OutputSink", ex);
-      }
-    }
-    if (this.pool != null) {
-      this.pool.shutdownNow();
-      try {
-        // wait for 10 sec
-        boolean shutdown = this.pool.awaitTermination(10000, TimeUnit.MILLISECONDS);
-        if (!shutdown) {
-          LOG.warn("Failed to shutdown the thread pool after 10 seconds");
-        }
-      } catch (InterruptedException e) {
-        LOG.warn("Got interrupted while waiting for the thread pool to shut down" + e);
-      }
-    }
-    if (connection != null) {
-      try {
-        connection.close();
-      } catch (IOException ex) {
-        LOG.warn("Got exception closing connection :" + ex);
-      }
-    }
-    super.doStop();
+    this.operationTimeoutNs =
+      TimeUnit.MILLISECONDS.toNanos(conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
+    this.connection = context.getServer().getAsyncClusterConnection();
   }
 
   /**
-   * Returns a Thread pool for the RPC's to region replicas. Similar to
-   * Connection's thread pool.
+   * returns true if the specified entry must be replicated. We should always replicate meta
+   * operations (e.g. flush) and use the user HTD flag to decide whether or not replicate the
+   * memstore.
    */
-  private ExecutorService getDefaultThreadPool(Configuration conf) {
-    int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256);
-    if (maxThreads == 0) {
-      maxThreads = Runtime.getRuntime().availableProcessors() * 8;
+  private boolean requiresReplication(Optional<TableDescriptor> tableDesc, Entry entry) {
+    // empty edit does not need to be replicated
+    if (entry.getEdit().isEmpty() || !tableDesc.isPresent()) {
+      return false;
     }
-    long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60);
-    LinkedBlockingQueue<Runnable> workQueue =
-        new LinkedBlockingQueue<>(maxThreads *
-            conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-              HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-    ThreadPoolExecutor tpe = new ThreadPoolExecutor(
-      maxThreads,
-      maxThreads,
-      keepAliveTime,
-      TimeUnit.SECONDS,
-      workQueue,
-      Threads.newDaemonThreadFactory(this.getClass().getSimpleName() + "-rpc-shared-"));
-    tpe.allowCoreThreadTimeOut(true);
-    return tpe;
+    // meta edits (e.g. flush) must be always replicated
+    return entry.getEdit().isMetaEdit() || tableDesc.get().hasRegionMemStoreReplication();
   }
 
-  @Override
-  public boolean replicate(ReplicateContext replicateContext) {
-    /* A note on batching in RegionReplicaReplicationEndpoint (RRRE):
-     *
-     * RRRE relies on batching from two different mechanisms. The first is the batching from
-     * ReplicationSource since RRRE is a ReplicationEndpoint driven by RS. RS reads from a single
-     * WAL file filling up a buffer of heap size "replication.source.size.capacity"(64MB) or at most
-     * "replication.source.nb.capacity" entries or until it sees the end of file (in live tailing).
-     * Then RS passes all the buffered edits in this replicate() call context. RRRE puts the edits
-     * to the WALSplitter.EntryBuffers which is a blocking buffer space of up to
-     * "hbase.region.replica.replication.buffersize" (128MB) in size. This buffer splits the edits
-     * based on regions.
-     *
-     * There are "hbase.region.replica.replication.writer.threads"(default 3) writer threads which
-     * pick largest per-region buffer and send it to the SinkWriter (see RegionReplicaOutputSink).
-     * The SinkWriter in this case will send the wal edits to all secondary region replicas in
-     * parallel via a retrying rpc call. EntryBuffers guarantees that while a buffer is
-     * being written to the sink, another buffer for the same region will not be made available to
-     * writers ensuring regions edits are not replayed out of order.
-     *
-     * The replicate() call won't return until all the buffers are sent and ack'd by the sinks so
-     * that the replication can assume all edits are persisted. We may be able to do a better
-     * pipelining between the replication thread and output sinks later if it becomes a bottleneck.
-     */
-
-    while (this.isRunning()) {
-      try {
-        for (Entry entry: replicateContext.getEntries()) {
-          entryBuffers.appendEntry(entry);
+  private void getRegionLocations(CompletableFuture<RegionLocations> future,
+      TableDescriptor tableDesc, byte[] encodedRegionName, byte[] row, boolean reload) {
+    FutureUtils.addListener(connection.getRegionLocations(tableDesc.getTableName(), row, reload),
+      (r, e) -> {
+        if (e != null) {
+          future.completeExceptionally(e);
+          return;
         }
-        outputSink.flush(); // make sure everything is flushed
-        ctx.getMetrics().incrLogEditsFiltered(
-          outputSink.getSkippedEditsCounter().getAndSet(0));
-        return true;
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        return false;
-      } catch (IOException e) {
-        LOG.warn("Received IOException while trying to replicate"
-            + StringUtils.stringifyException(e));
-      }
-    }
-
-    return false;
-  }
-
-  @Override
-  public boolean canReplicateToSameCluster() {
-    return true;
-  }
-
-  @Override
-  protected WALEntryFilter getScopeWALEntryFilter() {
-    // we do not care about scope. We replicate everything.
-    return null;
+        // if we are not loading from cache, just return
+        if (reload) {
+          future.complete(r);
+          return;
+        }
+        // check if the number of region replicas is correct, and also the primary region name
+        // matches
+        if (r.size() == tableDesc.getRegionReplication() && Bytes.equals(
+          r.getDefaultRegionLocation().getRegion().getEncodedNameAsBytes(), encodedRegionName)) {
+          future.complete(r);
+        } else {
+          // reload again as the information in cache maybe stale
+          getRegionLocations(future, tableDesc, encodedRegionName, row, true);
+        }
+      });
   }
 
-  static class RegionReplicaOutputSink extends OutputSink {
-    private final RegionReplicaSinkWriter sinkWriter;
-    private final TableDescriptors tableDescriptors;
-    private final Cache<TableName, Boolean> memstoreReplicationEnabled;
-
-    public RegionReplicaOutputSink(PipelineController controller, TableDescriptors tableDescriptors,
-        EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool,
-        int numWriters, int operationTimeout) {
-      super(controller, entryBuffers, numWriters);
-      this.sinkWriter =
-          new RegionReplicaSinkWriter(this, connection, pool, operationTimeout, tableDescriptors);
-      this.tableDescriptors = tableDescriptors;
-
-      // A cache for the table "memstore replication enabled" flag.
-      // It has a default expiry of 5 sec. This means that if the table is altered
-      // with a different flag value, we might miss to replicate for that amount of
-      // time. But this cache avoid the slow lookup and parsing of the TableDescriptor.
-      int memstoreReplicationEnabledCacheExpiryMs = connection.getConfiguration()
-        .getInt("hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000);
-      this.memstoreReplicationEnabled = CacheBuilder.newBuilder()
-        .expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS)
-        .initialCapacity(10)
-        .maximumSize(1000)
-        .build();
+  private void replicate(CompletableFuture<Long> future, RegionLocations locs,
+      TableDescriptor tableDesc, byte[] encodedRegionName, byte[] row, List<Entry> entries) {
+    if (locs.size() == 1) {
+      // Could this happen?
+      future.complete(Long.valueOf(entries.size()));
+      return;
     }
-
-    @Override
-    public void append(RegionEntryBuffer buffer) throws IOException {
-      List<Entry> entries = buffer.getEntryBuffer();
-
-      if (entries.isEmpty() || entries.get(0).getEdit().getCells().isEmpty()) {
-        return;
-      }
-
-      // meta edits (e.g. flush) are always replicated.
-      // data edits (e.g. put) are replicated if the table requires them.
-      if (!requiresReplication(buffer.getTableName(), entries)) {
-        return;
+    if (!Bytes.equals(locs.getDefaultRegionLocation().getRegion().getEncodedNameAsBytes(),
+      encodedRegionName)) {
+      // the region name is not equal, this usually means the region has been split or merged, so
+      // give up replicating as the new region(s) should already have all the data of the parent
+      // region(s).
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+          "Skipping {} entries in table {} because located region {} is different than" +
+            " the original region {} from WALEdit",
+          tableDesc.getTableName(), locs.getDefaultRegionLocation().getRegion().getEncodedName(),
+          Bytes.toStringBinary(encodedRegionName));
       }
-
-      sinkWriter.append(buffer.getTableName(), buffer.getEncodedRegionName(),
-        CellUtil.cloneRow(entries.get(0).getEdit().getCells().get(0)), entries);
-    }
-
-    @Override
-    public boolean flush() throws IOException {
-      // nothing much to do for now. Wait for the Writer threads to finish up
-      // append()'ing the data.
-      entryBuffers.waitUntilDrained();
-      return super.flush();
-    }
-
-    @Override
-    public boolean keepRegionEvent(Entry entry) {
-      return true;
-    }
-
-    @Override
-    public List<Path> finishWritingAndClose() throws IOException {
-      finishWriting(true);
-      return null;
-    }
-
-    @Override
-    public Map<byte[], Long> getOutputCounts() {
-      return null; // only used in tests
-    }
-
-    @Override
-    public int getNumberOfRecoveredRegions() {
-      return 0;
-    }
-
-    AtomicLong getSkippedEditsCounter() {
-      return skippedEdits;
+      future.complete(Long.valueOf(entries.size()));
+      return;
     }
-
-    /**
-     * returns true if the specified entry must be replicated.
-     * We should always replicate meta operations (e.g. flush)
-     * and use the user HTD flag to decide whether or not replicate the memstore.
-     */
-    private boolean requiresReplication(final TableName tableName, final List<Entry> entries)
-        throws IOException {
-      // unit-tests may not the TableDescriptors, bypass the check and always replicate
-      if (tableDescriptors == null) return true;
-
-      Boolean requiresReplication = memstoreReplicationEnabled.getIfPresent(tableName);
-      if (requiresReplication == null) {
-        // check if the table requires memstore replication
-        // some unit-test drop the table, so we should do a bypass check and always replicate.
-        TableDescriptor htd = tableDescriptors.get(tableName);
-        requiresReplication = htd == null || htd.hasRegionMemStoreReplication();
-        memstoreReplicationEnabled.put(tableName, requiresReplication);
-      }
-
-      // if memstore replication is not required, check the entries.
-      // meta edits (e.g. flush) must be always replicated.
-      if (!requiresReplication) {
-        int skipEdits = 0;
-        java.util.Iterator<Entry> it = entries.iterator();
-        while (it.hasNext()) {
-          Entry entry = it.next();
-          if (entry.getEdit().isMetaEdit()) {
-            requiresReplication = true;
+    AtomicReference<Throwable> error = new AtomicReference<>();
+    AtomicInteger remainingTasks = new AtomicInteger(locs.size() - 1);
+    AtomicLong skippedEdits = new AtomicLong(0);
+
+    for (int i = 1, n = locs.size(); i < n; i++) {
+      final int replicaId = i;
+      FutureUtils.addListener(connection.replay(tableDesc.getTableName(),
+        locs.getRegionLocation(replicaId).getRegion().getEncodedNameAsBytes(), row, entries,
+        replicaId, numRetries, operationTimeoutNs), (r, e) -> {
+          if (e != null) {
+            LOG.warn("Failed to replicate to {}", locs.getRegionLocation(replicaId), e);
+            error.compareAndSet(null, e);
           } else {
-            it.remove();
-            skipEdits++;
+            AtomicUtils.updateMax(skippedEdits, r.longValue());
           }
-        }
-        skippedEdits.addAndGet(skipEdits);
-      }
-      return requiresReplication;
+          if (remainingTasks.decrementAndGet() == 0) {
+            if (error.get() != null) {
+              future.completeExceptionally(error.get());
+            } else {
+              future.complete(skippedEdits.get());
+            }
+          }
+        });
     }
   }
 
-  static class RegionReplicaSinkWriter extends SinkWriter {
-    RegionReplicaOutputSink sink;
-    ClusterConnection connection;
-    RpcControllerFactory rpcControllerFactory;
-    RpcRetryingCallerFactory rpcRetryingCallerFactory;
-    int operationTimeout;
-    ExecutorService pool;
-    Cache<TableName, Boolean> disabledAndDroppedTables;
-    TableDescriptors tableDescriptors;
-
-    public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection,
-        ExecutorService pool, int operationTimeout, TableDescriptors tableDescriptors) {
-      this.sink = sink;
-      this.connection = connection;
-      this.operationTimeout = operationTimeout;
-      this.rpcRetryingCallerFactory
-        = RpcRetryingCallerFactory.instantiate(connection.getConfiguration());
-      this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration());
-      this.pool = pool;
-      this.tableDescriptors = tableDescriptors;
-
-      int nonExistentTableCacheExpiryMs = connection.getConfiguration()
-        .getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000);
-      // A cache for non existing tables that have a default expiry of 5 sec. This means that if the
-      // table is created again with the same name, we might miss to replicate for that amount of
-      // time. But this cache prevents overloading meta requests for every edit from a deleted file.
-      disabledAndDroppedTables = CacheBuilder.newBuilder()
-        .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS)
-        .initialCapacity(10)
-        .maximumSize(1000)
-        .build();
+  private void logSkipped(TableName tableName, List<Entry> entries, String reason) {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Skipping {} entries because table {} is {}", entries.size(), tableName, reason);
+      for (Entry entry : entries) {
+        LOG.trace("Skipping : {}", entry);
+      }
     }
+  }
 
-    public void append(TableName tableName, byte[] encodedRegionName, byte[] row,
-        List<Entry> entries) throws IOException {
-
-      if (disabledAndDroppedTables.getIfPresent(tableName) != null) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Skipping " + entries.size() + " entries because table " + tableName
-            + " is cached as a disabled or dropped table");
-          for (Entry entry : entries) {
-            LOG.trace("Skipping : " + entry);
-          }
-        }
-        sink.getSkippedEditsCounter().addAndGet(entries.size());
-        return;
+  private CompletableFuture<Long> replicate(TableDescriptor tableDesc, byte[] encodedRegionName,
+      List<Entry> entries) {
+    if (disabledTableCache.getIfPresent(tableDesc.getTableName()) != null) {
+      logSkipped(tableDesc.getTableName(), entries, "cached as a disabled table");
+      return CompletableFuture.completedFuture(Long.valueOf(entries.size()));
+    }
+    byte[] row = CellUtil.cloneRow(entries.get(0).getEdit().getCells().get(0));
+    CompletableFuture<RegionLocations> locateFuture = new CompletableFuture<>();
+    getRegionLocations(locateFuture, tableDesc, encodedRegionName, row, false);
+    CompletableFuture<Long> future = new CompletableFuture<>();
+    FutureUtils.addListener(locateFuture, (locs, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+      } else {
+        replicate(future, locs, tableDesc, encodedRegionName, row, entries);
       }
+    });
+    return future;
+  }
 
-      // If the table is disabled or dropped, we should not replay the entries, and we can skip
-      // replaying them. However, we might not know whether the table is disabled until we
-      // invalidate the cache and check from meta
-      RegionLocations locations = null;
-      boolean useCache = true;
-      while (true) {
-        // get the replicas of the primary region
+  @Override
+  public boolean replicate(ReplicateContext replicateContext) {
+    Map<byte[], Pair<TableDescriptor, List<Entry>>> encodedRegionName2Entries =
+      new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    long skippedEdits = 0;
+    RetryCounter retryCounter = retryCounterFactory.create();
+    outer: while (isRunning()) {
+      encodedRegionName2Entries.clear();
+      skippedEdits = 0;
+      for (Entry entry : replicateContext.getEntries()) {
+        Optional<TableDescriptor> tableDesc;
         try {
-          locations = RegionReplicaReplayCallable
-              .getRegionLocations(connection, tableName, row, useCache, 0);
-
-          if (locations == null) {
-            throw new HBaseIOException("Cannot locate locations for "
-                + tableName + ", row:" + Bytes.toStringBinary(row));
+          tableDesc = tableDescriptorCache.get(entry.getKey().getTableName());
+        } catch (ExecutionException e) {
+          LOG.warn("Failed to load table descriptor for {}, attempts={}",
+            entry.getKey().getTableName(), retryCounter.getAttemptTimes(), e.getCause());
+          if (!retryCounter.shouldRetry()) {
+            return false;
           }
-        } catch (TableNotFoundException e) {
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("Skipping " + entries.size() + " entries because table " + tableName
-              + " is dropped. Adding table to cache.");
-            for (Entry entry : entries) {
-              LOG.trace("Skipping : " + entry);
-            }
+          try {
+            retryCounter.sleepUntilNextRetry();
+          } catch (InterruptedException e1) {
+            // restore the interrupted state
+            Thread.currentThread().interrupt();
+            return false;
           }
-          disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache. Value ignored
-          // skip this entry
-          sink.getSkippedEditsCounter().addAndGet(entries.size());
-          return;
+          continue outer;
         }
-
-        // check whether we should still replay this entry. If the regions are changed, or the
-        // entry is not coming from the primary region, filter it out.
-        HRegionLocation primaryLocation = locations.getDefaultRegionLocation();
-        if (!Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(),
-          encodedRegionName)) {
-          if (useCache) {
-            useCache = false;
-            continue; // this will retry location lookup
-          }
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("Skipping " + entries.size() + " entries in table " + tableName
-              + " because located region " + primaryLocation.getRegionInfo().getEncodedName()
-              + " is different than the original region " + Bytes.toStringBinary(encodedRegionName)
-              + " from WALEdit");
-            for (Entry entry : entries) {
-              LOG.trace("Skipping : " + entry);
-            }
-          }
-          sink.getSkippedEditsCounter().addAndGet(entries.size());
-          return;
+        if (!requiresReplication(tableDesc, entry)) {
+          skippedEdits++;
+          continue;
         }
-        break;
+        byte[] encodedRegionName = entry.getKey().getEncodedRegionName();
+        encodedRegionName2Entries
+          .computeIfAbsent(encodedRegionName, k -> Pair.newPair(tableDesc.get(), new ArrayList<>()))
+          .getSecond().add(entry);
       }
-
-      if (locations.size() == 1) {
-        return;
-      }
-
-      ArrayList<Future<ReplicateWALEntryResponse>> tasks = new ArrayList<>(locations.size() - 1);
-
-      // All passed entries should belong to one region because it is coming from the EntryBuffers
-      // split per region. But the regions might split and merge (unlike log recovery case).
-      for (int replicaId = 0; replicaId < locations.size(); replicaId++) {
-        HRegionLocation location = locations.getRegionLocation(replicaId);
-        if (!RegionReplicaUtil.isDefaultReplica(replicaId)) {
-          RegionInfo regionInfo = location == null
-              ? RegionReplicaUtil.getRegionInfoForReplica(
-                locations.getDefaultRegionLocation().getRegionInfo(), replicaId)
-              : location.getRegionInfo();
-          RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection,
-            rpcControllerFactory, tableName, location, regionInfo, row, entries,
-            sink.getSkippedEditsCounter());
-           Future<ReplicateWALEntryResponse> task = pool.submit(
-             new RetryingRpcCallable<>(rpcRetryingCallerFactory, callable, operationTimeout));
-           tasks.add(task);
-        }
+      break;
+    }
+    // send the request to regions
+    retryCounter = retryCounterFactory.create();
+    while (isRunning()) {
+      List<Pair<CompletableFuture<Long>, byte[]>> futureAndEncodedRegionNameList =
+        new ArrayList<Pair<CompletableFuture<Long>, byte[]>>();
+      for (Map.Entry<byte[], Pair<TableDescriptor, List<Entry>>> entry : encodedRegionName2Entries
+        .entrySet()) {
+        CompletableFuture<Long> future =
+          replicate(entry.getValue().getFirst(), entry.getKey(), entry.getValue().getSecond());
+        futureAndEncodedRegionNameList.add(Pair.newPair(future, entry.getKey()));
       }
-
-      boolean tasksCancelled = false;
-      for (int replicaId = 0; replicaId < tasks.size(); replicaId++) {
+      for (Pair<CompletableFuture<Long>, byte[]> pair : futureAndEncodedRegionNameList) {
+        byte[] encodedRegionName = pair.getSecond();
         try {
-          tasks.get(replicaId).get();
+          skippedEdits += pair.getFirst().get().longValue();
+          encodedRegionName2Entries.remove(encodedRegionName);
         } catch (InterruptedException e) {
-          throw new InterruptedIOException(e.getMessage());
+          // restore the interrupted state
+          Thread.currentThread().interrupt();
+          return false;
         } catch (ExecutionException e) {
+          Pair<TableDescriptor, List<Entry>> tableAndEntries =
+            encodedRegionName2Entries.get(encodedRegionName);
+          TableName tableName = tableAndEntries.getFirst().getTableName();
+          List<Entry> entries = tableAndEntries.getSecond();
           Throwable cause = e.getCause();
-          boolean canBeSkipped = false;
-          if (cause instanceof IOException) {
-            // The table can be disabled or dropped at this time. For disabled tables, we have no
-            // cheap mechanism to detect this case because meta does not contain this information.
-            // ClusterConnection.isTableDisabled() is a zk call which we cannot do for every replay
-            // RPC. So instead we start the replay RPC with retries and check whether the table is
-            // dropped or disabled which might cause SocketTimeoutException, or
-            // RetriesExhaustedException or similar if we get IOE.
-            if (cause instanceof TableNotFoundException
-                || connection.isTableDisabled(tableName)) {
-              disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later.
-              canBeSkipped = true;
-            } else if (tableDescriptors != null) {
-              TableDescriptor tableDescriptor = tableDescriptors.get(tableName);
-              if (tableDescriptor != null
-                  //(replicaId + 1) as no task is added for primary replica for replication
-                  && tableDescriptor.getRegionReplication() <= (replicaId + 1)) {
-                canBeSkipped = true;
-              }
-            }
-            if (canBeSkipped) {
-              if (LOG.isTraceEnabled()) {
-                LOG.trace("Skipping " + entries.size() + " entries in table " + tableName
-                    + " because received exception for dropped or disabled table",
-                  cause);
-                for (Entry entry : entries) {
-                  LOG.trace("Skipping : " + entry);
-                }
-              }
-              if (!tasksCancelled) {
-                sink.getSkippedEditsCounter().addAndGet(entries.size());
-                tasksCancelled = true; // so that we do not add to skipped counter again
-              }
-              continue;
-            }
-
-            // otherwise rethrow
-            throw (IOException)cause;
+          // The table can be disabled or dropped at this time. For disabled tables, we have no
+          // cheap mechanism to detect this case because meta does not contain this information.
+          // ClusterConnection.isTableDisabled() is a zk call which we cannot do for every replay
+          // RPC. So instead we start the replay RPC with retries and check whether the table is
+          // dropped or disabled which might cause SocketTimeoutException, or
+          // RetriesExhaustedException or similar if we get IOE.
+          if (cause instanceof TableNotFoundException) {
+            // add to cache that the table does not exist
+            tableDescriptorCache.put(tableName, Optional.empty());
+            logSkipped(tableName, entries, "dropped");
+            skippedEdits += entries.size();
+            encodedRegionName2Entries.remove(encodedRegionName);
+            continue;
+          }
+          boolean disabled = false;
+          try {
+            disabled = connection.getAdmin().isTableDisabled(tableName).get();
+          } catch (InterruptedException e1) {
+            // restore the interrupted state
+            Thread.currentThread().interrupt();
+            return false;
+          } catch (ExecutionException e1) {
+            LOG.warn("Failed to test whether {} is disabled, assume it is not disabled", tableName,
+              e1.getCause());
+          }
+          if (disabled) {
+            disabledTableCache.put(tableName, tableName);
+            logSkipped(tableName, entries, "disabled");
+            skippedEdits += entries.size();
+            encodedRegionName2Entries.remove(encodedRegionName);
+            continue;
           }
-          // unexpected exception
-          throw new IOException(cause);
+          LOG.warn("Failed to replicate {} entries for region {} of table {}", entries.size(),
+            Bytes.toStringBinary(encodedRegionName), tableName);
+        }
+      }
+      // we have done
+      if (encodedRegionName2Entries.isEmpty()) {
+        ctx.getMetrics().incrLogEditsFiltered(skippedEdits);
+        return true;
+      } else {
+        LOG.warn("Failed to replicate all entris, retry={}", retryCounter.getAttemptTimes());
+        if (!retryCounter.shouldRetry()) {
+          return false;
+        }
+        try {
+          retryCounter.sleepUntilNextRetry();
+        } catch (InterruptedException e) {
+          // restore the interrupted state
+          Thread.currentThread().interrupt();
+          return false;
         }
       }
     }
-  }
 
-  static class RetryingRpcCallable<V> implements Callable<V> {
-    RpcRetryingCallerFactory factory;
-    RetryingCallable<V> callable;
-    int timeout;
-    public RetryingRpcCallable(RpcRetryingCallerFactory factory, RetryingCallable<V> callable,
-        int timeout) {
-      this.factory = factory;
-      this.callable = callable;
-      this.timeout = timeout;
-    }
-    @Override
-    public V call() throws Exception {
-      return factory.<V>newCaller().callWithRetries(callable, timeout);
-    }
+    return false;
   }
 
-  /**
-   * Calls replay on the passed edits for the given set of entries belonging to the region. It skips
-   * the entry if the region boundaries have changed or the region is gone.
-   */
-  static class RegionReplicaReplayCallable extends
-      RegionAdminServiceCallable<ReplicateWALEntryResponse> {
-    private final List<Entry> entries;
-    private final byte[] initialEncodedRegionName;
-    private final AtomicLong skippedEntries;
-
-    public RegionReplicaReplayCallable(ClusterConnection connection,
-        RpcControllerFactory rpcControllerFactory, TableName tableName,
-        HRegionLocation location, RegionInfo regionInfo, byte[] row,List<Entry> entries,
-        AtomicLong skippedEntries) {
-      super(connection, rpcControllerFactory, location, tableName, row, regionInfo.getReplicaId());
-      this.entries = entries;
-      this.skippedEntries = skippedEntries;
-      this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes();
-    }
-
-    @Override
-    public ReplicateWALEntryResponse call(HBaseRpcController controller) throws Exception {
-      // Check whether we should still replay this entry. If the regions are changed, or the
-      // entry is not coming form the primary region, filter it out because we do not need it.
-      // Regions can change because of (1) region split (2) region merge (3) table recreated
-      boolean skip = false;
-      if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(),
-          initialEncodedRegionName)) {
-        skip = true;
-      }
-      if (!this.entries.isEmpty() && !skip) {
-        Entry[] entriesArray = new Entry[this.entries.size()];
-        entriesArray = this.entries.toArray(entriesArray);
-
-        // set the region name for the target region replica
-        Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
-            ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray, location
-                .getRegionInfo().getEncodedNameAsBytes(), null, null, null);
-        controller.setCellScanner(p.getSecond());
-        return stub.replay(controller, p.getFirst());
-      }
+  @Override
+  public boolean canReplicateToSameCluster() {
+    return true;
+  }
 
-      if (skip) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Skipping " + entries.size() + " entries in table " + tableName
-            + " because located region " + location.getRegionInfo().getEncodedName()
-            + " is different than the original region "
-            + Bytes.toStringBinary(initialEncodedRegionName) + " from WALEdit");
-          for (Entry entry : entries) {
-            LOG.trace("Skipping : " + entry);
-          }
-        }
-        skippedEntries.addAndGet(entries.size());
-      }
-      return ReplicateWALEntryResponse.newBuilder().build();
-    }
+  @Override
+  protected WALEntryFilter getScopeWALEntryFilter() {
+    // we do not care about scope. We replicate everything.
+    return null;
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index f1b6e76..c453e21 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -282,7 +282,7 @@ public class ReplicationSource implements ReplicationSourceInterface {
       tableDescriptors = ((HRegionServer) server).getTableDescriptors();
     }
     replicationEndpoint
-      .init(new ReplicationEndpoint.Context(conf, replicationPeer.getConfiguration(), fs,
+      .init(new ReplicationEndpoint.Context(server, conf, replicationPeer.getConfiguration(), fs,
         replicationPeer.getId(), clusterId, replicationPeer, metrics, tableDescriptors, server));
     replicationEndpoint.start();
     replicationEndpoint.awaitRunning(waitOnEndpointSeconds, TimeUnit.SECONDS);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java
index e1e55f5..9af60c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java
@@ -88,7 +88,7 @@ public class TestAsyncTableNoncedRetry {
       registry.getClusterId().get(), null, User.getCurrent()) {
 
       @Override
-      public NonceGenerator getNonceGenerator() {
+      NonceGenerator getNonceGenerator() {
         return NONCE_GENERATOR;
       }
     };
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
index 04db81a..017d7c9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
@@ -20,16 +20,17 @@ package org.apache.hadoop.hbase.replication.regionserver;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicLong;
+import java.util.UUID;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.Cell.Type;
 import org.apache.hadoop.hbase.CellBuilderFactory;
@@ -42,7 +43,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -51,12 +51,12 @@ import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -383,9 +383,8 @@ public class TestRegionReplicaReplicationEndpoint {
     testRegionReplicaReplicationIgnores(false, true);
   }
 
-  public void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disableReplication)
+  private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disableReplication)
       throws Exception {
-
     // tests having edits from a disabled or dropped table is handled correctly by skipping those
     // entries and further edits after the edits from dropped/disabled table can be replicated
     // without problems.
@@ -405,8 +404,7 @@ public class TestRegionReplicaReplicationEndpoint {
     HTU.getAdmin().createTable(htd);
 
     // both tables are created, now pause replication
-    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
-    admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
+    HTU.getAdmin().disableReplicationPeer(ServerRegionReplicaUtil.getReplicationPeerId());
 
     // now that the replication is disabled, write to the table to be dropped, then drop the table.
 
@@ -416,19 +414,9 @@ public class TestRegionReplicaReplicationEndpoint {
 
     HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);
 
-    AtomicLong skippedEdits = new AtomicLong();
-    RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink =
-        mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
-    when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
-    FSTableDescriptors fstd = new FSTableDescriptors(HTU.getConfiguration(),
-        FileSystem.get(HTU.getConfiguration()), HTU.getDefaultRootDirPath());
-    RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter =
-        new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink,
-            (ClusterConnection) connection, Executors.newSingleThreadExecutor(), Integer.MAX_VALUE,
-            fstd);
     RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
     HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
-    byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();
+    byte[] encodedRegionName = hrl.getRegion().getEncodedNameAsBytes();
 
     Cell cell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("A"))
         .setFamily(HTU.fam1).setValue(Bytes.toBytes("VAL")).setType(Type.Put).build();
@@ -436,7 +424,6 @@ public class TestRegionReplicaReplicationEndpoint {
       new WALKeyImpl(encodedRegionName, toBeDisabledTable, 1),
         new WALEdit()
             .add(cell));
-
     HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table
     if (dropTable) {
       HTU.getAdmin().deleteTable(toBeDisabledTable);
@@ -445,11 +432,23 @@ public class TestRegionReplicaReplicationEndpoint {
       HTU.getAdmin().modifyTable(toBeDisabledTable, htd);
       HTU.getAdmin().enableTable(toBeDisabledTable);
     }
-    sinkWriter.append(toBeDisabledTable, encodedRegionName,
-      HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));
-
-    assertEquals(2, skippedEdits.get());
 
+    HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(0);
+    MetricsSource metrics = mock(MetricsSource.class);
+    ReplicationEndpoint.Context ctx =
+      new ReplicationEndpoint.Context(rs, HTU.getConfiguration(), HTU.getConfiguration(),
+        HTU.getTestFileSystem(), ServerRegionReplicaUtil.getReplicationPeerId(),
+        UUID.fromString(rs.getClusterId()), rs.getReplicationSourceService().getReplicationPeers()
+          .getPeer(ServerRegionReplicaUtil.getReplicationPeerId()),
+        metrics, rs.getTableDescriptors(), rs);
+    RegionReplicaReplicationEndpoint rrpe = new RegionReplicaReplicationEndpoint();
+    rrpe.init(ctx);
+    rrpe.start();
+    ReplicationEndpoint.ReplicateContext repCtx = new ReplicationEndpoint.ReplicateContext();
+    repCtx.setEntries(Lists.newArrayList(entry, entry));
+    assertTrue(rrpe.replicate(repCtx));
+    verify(metrics, times(1)).incrLogEditsFiltered(eq(2L));
+    rrpe.stop();
     if (disableReplication) {
       // enable replication again so that we can verify replication
       HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table
@@ -460,17 +459,14 @@ public class TestRegionReplicaReplicationEndpoint {
 
     try {
       // load some data to the to-be-dropped table
-
       // load the data to the table
       HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
 
       // now enable the replication
-      admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
+      HTU.getAdmin().enableReplicationPeer(ServerRegionReplicaUtil.getReplicationPeerId());
 
       verifyReplication(tableName, regionReplication, 0, 1000);
-
     } finally {
-      admin.close();
       table.close();
       rl.close();
       tableToBeDisabled.close();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
index ab67d94..de0f151 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
@@ -19,15 +19,16 @@ package org.apache.hadoop.hbase.replication.regionserver;
 
 import static org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster.closeRegion;
 import static org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster.openRegion;
-import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.Optional;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -37,24 +38,22 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.WALCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.WALObserver;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext;
-import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
@@ -73,8 +72,6 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
-
 /**
  * Tests RegionReplicaReplicationEndpoint. Unlike TestRegionReplicaReplicationEndpoint this
  * class contains lower level tests using callables.
@@ -178,39 +175,34 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
   public void testReplayCallable() throws Exception {
     // tests replaying the edits to a secondary region replica using the Callable directly
     openRegion(HTU, rs0, hriSecondary);
-    ClusterConnection connection =
-        (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration());
 
-    //load some data to primary
+    // load some data to primary
     HTU.loadNumericRows(table, f, 0, 1000);
 
     Assert.assertEquals(1000, entries.size());
-    // replay the edits to the secondary using replay callable
-    replicateUsingCallable(connection, entries);
+    try (AsyncClusterConnection conn = ClusterConnectionFactory
+      .createAsyncClusterConnection(HTU.getConfiguration(), null, User.getCurrent())) {
+      // replay the edits to the secondary using replay callable
+      replicateUsingCallable(conn, entries);
+    }
 
     Region region = rs0.getRegion(hriSecondary.getEncodedName());
     HTU.verifyNumericRows(region, f, 0, 1000);
 
     HTU.deleteNumericRows(table, f, 0, 1000);
     closeRegion(HTU, rs0, hriSecondary);
-    connection.close();
   }
 
-  private void replicateUsingCallable(ClusterConnection connection, Queue<Entry> entries)
-      throws IOException, RuntimeException {
+  private void replicateUsingCallable(AsyncClusterConnection connection, Queue<Entry> entries)
+      throws IOException, ExecutionException, InterruptedException {
     Entry entry;
     while ((entry = entries.poll()) != null) {
       byte[] row = CellUtil.cloneRow(entry.getEdit().getCells().get(0));
-      RegionLocations locations = connection.locateRegion(tableName, row, true, true);
-      RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection,
-        RpcControllerFactory.instantiate(connection.getConfiguration()),
-        table.getName(), locations.getRegionLocation(1),
-        locations.getRegionLocation(1).getRegionInfo(), row, Lists.newArrayList(entry),
-        new AtomicLong());
-
-      RpcRetryingCallerFactory factory = RpcRetryingCallerFactory.instantiate(
-        connection.getConfiguration());
-      factory.<ReplicateWALEntryResponse> newCaller().callWithRetries(callable, 10000);
+      RegionLocations locations = connection.getRegionLocations(tableName, row, true).get();
+      connection
+        .replay(tableName, locations.getRegionLocation(1).getRegion().getEncodedNameAsBytes(), row,
+          Collections.singletonList(entry), 1, Integer.MAX_VALUE, TimeUnit.SECONDS.toNanos(10))
+        .get();
     }
   }
 
@@ -218,49 +210,49 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
   public void testReplayCallableWithRegionMove() throws Exception {
     // tests replaying the edits to a secondary region replica using the Callable directly while
     // the region is moved to another location.It tests handling of RME.
-    openRegion(HTU, rs0, hriSecondary);
-    ClusterConnection connection =
-        (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration());
-    //load some data to primary
-    HTU.loadNumericRows(table, f, 0, 1000);
+    try (AsyncClusterConnection conn = ClusterConnectionFactory
+      .createAsyncClusterConnection(HTU.getConfiguration(), null, User.getCurrent())) {
+      openRegion(HTU, rs0, hriSecondary);
+      // load some data to primary
+      HTU.loadNumericRows(table, f, 0, 1000);
 
-    Assert.assertEquals(1000, entries.size());
-    // replay the edits to the secondary using replay callable
-    replicateUsingCallable(connection, entries);
+      Assert.assertEquals(1000, entries.size());
 
-    Region region = rs0.getRegion(hriSecondary.getEncodedName());
-    HTU.verifyNumericRows(region, f, 0, 1000);
+      // replay the edits to the secondary using replay callable
+      replicateUsingCallable(conn, entries);
 
-    HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
+      Region region = rs0.getRegion(hriSecondary.getEncodedName());
+      HTU.verifyNumericRows(region, f, 0, 1000);
 
-    // move the secondary region from RS0 to RS1
-    closeRegion(HTU, rs0, hriSecondary);
-    openRegion(HTU, rs1, hriSecondary);
+      HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
 
-    // replicate the new data
-    replicateUsingCallable(connection, entries);
+      // move the secondary region from RS0 to RS1
+      closeRegion(HTU, rs0, hriSecondary);
+      openRegion(HTU, rs1, hriSecondary);
 
-    region = rs1.getRegion(hriSecondary.getEncodedName());
-    // verify the new data. old data may or may not be there
-    HTU.verifyNumericRows(region, f, 1000, 2000);
+      // replicate the new data
+      replicateUsingCallable(conn, entries);
 
-    HTU.deleteNumericRows(table, f, 0, 2000);
-    closeRegion(HTU, rs1, hriSecondary);
-    connection.close();
+      region = rs1.getRegion(hriSecondary.getEncodedName());
+      // verify the new data. old data may or may not be there
+      HTU.verifyNumericRows(region, f, 1000, 2000);
+
+      HTU.deleteNumericRows(table, f, 0, 2000);
+      closeRegion(HTU, rs1, hriSecondary);
+    }
   }
 
   @Test
   public void testRegionReplicaReplicationEndpointReplicate() throws Exception {
     // tests replaying the edits to a secondary region replica using the RRRE.replicate()
     openRegion(HTU, rs0, hriSecondary);
-    ClusterConnection connection =
-        (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration());
     RegionReplicaReplicationEndpoint replicator = new RegionReplicaReplicationEndpoint();
 
     ReplicationEndpoint.Context context = mock(ReplicationEndpoint.Context.class);
     when(context.getConfiguration()).thenReturn(HTU.getConfiguration());
     when(context.getMetrics()).thenReturn(mock(MetricsSource.class));
-
+    when(context.getServer()).thenReturn(rs0);
+    when(context.getTableDescriptors()).thenReturn(rs0.getTableDescriptors());
     replicator.init(context);
     replicator.startAsync();
 
@@ -272,12 +264,11 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
     final String fakeWalGroupId = "fakeWALGroup";
     replicator.replicate(new ReplicateContext().setEntries(Lists.newArrayList(entries))
         .setWalGroupId(fakeWalGroupId));
-
+    replicator.stop();
     Region region = rs0.getRegion(hriSecondary.getEncodedName());
     HTU.verifyNumericRows(region, f, 0, 1000);
 
     HTU.deleteNumericRows(table, f, 0, 1000);
     closeRegion(HTU, rs0, hriSecondary);
-    connection.close();
   }
 }


[hbase] 07/09: HBASE-21537 Rewrite ServerManager.closeRegionSilentlyAndWait to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 4d99866df852f072494357742e66b23f39f03ce1
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Jan 10 11:47:41 2019 +0800

    HBASE-21537 Rewrite ServerManager.closeRegionSilentlyAndWait to use AsyncClusterConnection
    
    Signed-off-by: Michael Stack <st...@apache.org>
---
 .../hadoop/hbase/master/MasterMetaBootstrap.java   |  2 +-
 .../apache/hadoop/hbase/master/ServerManager.java  | 41 +++++++++++++---------
 .../apache/hadoop/hbase/util/HBaseFsckRepair.java  | 22 +++++++-----
 3 files changed, 38 insertions(+), 27 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
index e57817e..6e38bdd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
@@ -101,7 +101,7 @@ class MasterMetaBootstrap {
           RegionState r = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId);
           LOG.info("Closing excess replica of meta region " + r.getRegion());
           // send a close and wait for a max of 30 seconds
-          ServerManager.closeRegionSilentlyAndWait(master.getClusterConnection(),
+          ServerManager.closeRegionSilentlyAndWait(master.getAsyncClusterConnection(),
               r.getServerName(), r.getRegion(), 30000);
           ZKUtil.deleteNode(zooKeeper, zooKeeper.getZNodePaths().getZNodeForReplica(replicaId));
         }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index a8d5e21..f98899e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -48,13 +48,15 @@ import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.YouAreDeadException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -67,6 +69,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
@@ -660,35 +663,39 @@ public class ServerManager {
   }
 
   /**
-   * Contacts a region server and waits up to timeout ms
-   * to close the region.  This bypasses the active hmaster.
+   * Contacts a region server and waits up to timeout ms to close the region. This bypasses the
+   * active hmaster.
    */
-  public static void closeRegionSilentlyAndWait(ClusterConnection connection,
-    ServerName server, RegionInfo region, long timeout) throws IOException, InterruptedException {
-    AdminService.BlockingInterface rs = connection.getAdmin(server);
-    HBaseRpcController controller = connection.getRpcControllerFactory().newController();
+  public static void closeRegionSilentlyAndWait(AsyncClusterConnection connection,
+      ServerName server, RegionInfo region, long timeout) throws IOException, InterruptedException {
+    AsyncRegionServerAdmin admin = connection.getRegionServerAdmin(server);
     try {
-      ProtobufUtil.closeRegion(controller, rs, server, region.getRegionName());
+      FutureUtils.get(
+        admin.closeRegion(ProtobufUtil.buildCloseRegionRequest(server, region.getRegionName())));
     } catch (IOException e) {
       LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
     }
     long expiration = timeout + System.currentTimeMillis();
     while (System.currentTimeMillis() < expiration) {
-      controller.reset();
       try {
-        RegionInfo rsRegion =
-          ProtobufUtil.getRegionInfo(controller, rs, region.getRegionName());
-        if (rsRegion == null) return;
+        RegionInfo rsRegion = ProtobufUtil.toRegionInfo(FutureUtils
+          .get(
+            admin.getRegionInfo(RequestConverter.buildGetRegionInfoRequest(region.getRegionName())))
+          .getRegionInfo());
+        if (rsRegion == null) {
+          return;
+        }
       } catch (IOException ioe) {
-        if (ioe instanceof NotServingRegionException) // no need to retry again
+        if (ioe instanceof NotServingRegionException) {
+          // no need to retry again
           return;
-        LOG.warn("Exception when retrieving regioninfo from: "
-          + region.getRegionNameAsString(), ioe);
+        }
+        LOG.warn("Exception when retrieving regioninfo from: " + region.getRegionNameAsString(),
+          ioe);
       }
       Thread.sleep(1000);
     }
-    throw new IOException("Region " + region + " failed to close within"
-        + " timeout " + timeout);
+    throw new IOException("Region " + region + " failed to close within" + " timeout " + timeout);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index ec7f717..121d06c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -31,7 +31,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -143,16 +146,17 @@ public class HBaseFsckRepair {
   }
 
   /**
-   * Contacts a region server and waits up to hbase.hbck.close.timeout ms
-   * (default 120s) to close the region.  This bypasses the active hmaster.
+   * Contacts a region server and waits up to hbase.hbck.close.timeout ms (default 120s) to close
+   * the region. This bypasses the active hmaster.
    */
-  @SuppressWarnings("deprecation")
-  public static void closeRegionSilentlyAndWait(Connection connection,
-      ServerName server, RegionInfo region) throws IOException, InterruptedException {
-    long timeout = connection.getConfiguration()
-      .getLong("hbase.hbck.close.timeout", 120000);
-    ServerManager.closeRegionSilentlyAndWait((ClusterConnection)connection, server,
-         region, timeout);
+  public static void closeRegionSilentlyAndWait(Connection connection, ServerName server,
+      RegionInfo region) throws IOException, InterruptedException {
+    long timeout = connection.getConfiguration().getLong("hbase.hbck.close.timeout", 120000);
+    // this is a bit ugly but it is only used in the old hbck and tests, so I think it is fine.
+    try (AsyncClusterConnection asyncConn = ClusterConnectionFactory
+      .createAsyncClusterConnection(connection.getConfiguration(), null, User.getCurrent())) {
+      ServerManager.closeRegionSilentlyAndWait(asyncConn, server, region, timeout);
+    }
   }
 
   /**