You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2019/05/23 14:30:59 UTC

[hbase] branch HBASE-21512 updated (50d1a70 -> 5db9b81)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a change to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git.


 discard 50d1a70  HBASE-22037 Re-enable TestAvoidCellReferencesIntoShippedBlocks
 discard 365b9ec  HBASE-22351 Increase the wait time when creating table for TestProcedurePriority
 discard cc7c1b9  HBASE-22328 NPE in RegionReplicaReplicationEndpoint
 discard ed1d595  HBASE-22036 Rewrite TestScannerHeartbeatMessages
 discard 73bd0ce  HBASE-22239 Also catch RemoteException in SyncReplicationTestBase.verifyReplicationRequestRejection
 discard 86bcbab  HBASE-22302 Fix TestHbck
 discard f666544  HBASE-22297 Fix TestRegionMergeTransitionOnCluster and TestSplitTransactionOnCluster
 discard 30c4610  HBASE-22295 Fix TestClientOperationTimeout
 discard 51c545d  HBASE-22281 Fix failed shell UTs
 discard f3a592f  HBASE-22223 Implement RegionLocator based on AsyncTableRegionLocator
 discard 647c606  HBASE-22238 Fix TestRpcControllerFactory
 discard e1edfd0  HBASE-21725 Implement BufferedMutator Based on AsyncBufferedMutator
 discard 67166d1  HBASE-22241 Fix TestRegionServerCoprocessorEndpoint
 discard e3e2dc6  HBASE-21718 Implement Admin based on AsyncAdmin
 discard 314b3ca  HBASE-21717 Implement Connection based on AsyncConnection
 discard 36b5c4b  HBASE-21585 Remove ClusterConnection
 discard ebd6a70  HBASE-21779 Reimplement BulkLoadHFilesTool to use AsyncClusterConnection
 discard fc54a72  HBASE-21778 Remove the usage of the locateRegion related methods in ClusterConnection
 discard e96682f  HBASE-21719 Rewrite RegionPlacementMaintainer to use AsyncClusterConnection
 discard fcdeb1a  HBASE-21537 Rewrite ServerManager.closeRegionSilentlyAndWait to use AsyncClusterConnection
 discard 39ef27f  HBASE-21671 Rewrite RegionReplicaReplicationEndpoint to use AsyncClusterConnection
 discard 3ec40ae  HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection
 discard a8acde7  HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint
 discard fec7059  HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin
 discard 36211e1  HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager
 discard c27fe90  HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer
     add 2bf7ad4  HBASE-22447 Check refCount before free block in BucketCache
     add 8e47c8e  HBASE-22445 Add file info when throw exceptions in HFileReaderImpl
     add f1a8aa4  HBASE-22456 Polish TestSplitTransitionOnCluster
     add f773043  HBASE-22440. Override getWalGroupsReplicationStatus to avoid NPE
     add 6b899cc  HBASE-22462 Should run a 'mvn install' at the end of hadoop check in pre commit job
     new 3d7c2bc  HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer
     new 2f406e0  HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager
     new d4e1074  HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin
     new e5b82e2  HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint
     new 2a94270  HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection
     new 0e29c9d  HBASE-21671 Rewrite RegionReplicaReplicationEndpoint to use AsyncClusterConnection
     new 1453314  HBASE-21537 Rewrite ServerManager.closeRegionSilentlyAndWait to use AsyncClusterConnection
     new 3daf1ac  HBASE-21719 Rewrite RegionPlacementMaintainer to use AsyncClusterConnection
     new 68a23dd  HBASE-21778 Remove the usage of the locateRegion related methods in ClusterConnection
     new cba47b4  HBASE-21779 Reimplement BulkLoadHFilesTool to use AsyncClusterConnection
     new 51148aa  HBASE-21585 Remove ClusterConnection
     new b77a198  HBASE-21717 Implement Connection based on AsyncConnection
     new 9df28d8  HBASE-21718 Implement Admin based on AsyncAdmin
     new 060f98f  HBASE-22241 Fix TestRegionServerCoprocessorEndpoint
     new cc3cfbc  HBASE-21725 Implement BufferedMutator Based on AsyncBufferedMutator
     new 0d1ec0c  HBASE-22238 Fix TestRpcControllerFactory
     new a21168a  HBASE-22223 Implement RegionLocator based on AsyncTableRegionLocator
     new 9de0cd8  HBASE-22281 Fix failed shell UTs
     new 29fb885  HBASE-22295 Fix TestClientOperationTimeout
     new d43461a  HBASE-22297 Fix TestRegionMergeTransitionOnCluster and TestSplitTransactionOnCluster
     new 82a1ea9  HBASE-22302 Fix TestHbck
     new 417c8d1  HBASE-22239 Also catch RemoteException in SyncReplicationTestBase.verifyReplicationRequestRejection
     new d0b3a67  HBASE-22036 Rewrite TestScannerHeartbeatMessages
     new 3083d56  HBASE-22328 NPE in RegionReplicaReplicationEndpoint
     new 68a59ba  HBASE-22351 Increase the wait time when creating table for TestProcedurePriority
     new e71aa8b  HBASE-22037 Re-enable TestAvoidCellReferencesIntoShippedBlocks
     new 5db9b81  HBASE-22351 Addendum limit the concurrency when creating table

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (50d1a70)
            \
             N -- N -- N   refs/heads/HBASE-21512 (5db9b81)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 27 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 dev-support/hbase-personality.sh                   |   5 +
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java     |  69 ++++++++------
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  44 ++-------
 .../org/apache/hadoop/hbase/master/HMaster.java    |   9 ++
 .../master/procedure/TestProcedurePriority.java    |  14 ++-
 .../TestSplitTransactionOnCluster.java             | 104 +++++++--------------
 6 files changed, 106 insertions(+), 139 deletions(-)


[hbase] 12/27: HBASE-21717 Implement Connection based on AsyncConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit b77a198146e6238863c9f2cb3aec611206bc1d22
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Mar 7 11:51:51 2019 +0800

    HBASE-21717 Implement Connection based on AsyncConnection
---
 .../apache/hadoop/hbase/backup/TestBackupBase.java |   5 +-
 .../hadoop/hbase/backup/TestBackupMerge.java       |   8 +-
 .../hbase/backup/TestBackupMultipleDeletes.java    |  10 +-
 .../hadoop/hbase/backup/TestIncrementalBackup.java |  16 +-
 .../backup/TestIncrementalBackupDeleteTable.java   |  10 +-
 .../TestIncrementalBackupMergeWithFailures.java    |   7 +-
 .../backup/TestIncrementalBackupWithBulkLoad.java  |   6 +-
 .../backup/TestIncrementalBackupWithFailures.java  |   6 +-
 .../hadoop/hbase/backup/TestRemoteBackup.java      |   8 +-
 .../hbase/backup/master/TestBackupLogCleaner.java  |   6 +-
 .../hadoop/hbase/client/AsyncConnection.java       |   8 +
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |  49 +-
 .../hbase/client/AsyncTableResultScanner.java      |   4 +
 .../org/apache/hadoop/hbase/client/Connection.java |  17 +-
 .../hadoop/hbase/client/ConnectionFactory.java     |  44 +-
 .../hbase/client/ConnectionImplementation.java     |  53 +-
 .../client/ConnectionOverAsyncConnection.java      | 180 +++++++
 .../hadoop/hbase/client/ConnectionUtils.java       | 103 ++--
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |   5 +-
 .../org/apache/hadoop/hbase/client/HTable.java     | 124 +----
 .../client/RegionCoprocessorRpcChannelImpl.java    |  37 +-
 .../java/org/apache/hadoop/hbase/client/Scan.java  |   5 +
 .../java/org/apache/hadoop/hbase/client/Table.java | 446 +++++------------
 .../hadoop/hbase/client/TableOverAsyncTable.java   | 532 +++++++++++++++++++++
 .../hadoop/hbase/ipc/CoprocessorRpcChannel.java    |  12 +-
 .../apache/hadoop/hbase/client/SimpleRegistry.java |  83 ++++
 .../hadoop/hbase/client/TestAsyncProcess.java      |   2 +-
 .../hadoop/hbase/client/TestBufferedMutator.java   |  14 +-
 .../hadoop/hbase/client/TestClientNoCluster.java   |  33 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java    |  31 +-
 .../mapreduce/TestMultiTableInputFormatBase.java   |   6 +
 .../hbase/mapreduce/TestTableInputFormatBase.java  |   6 +
 .../org/apache/hadoop/hbase/rest/ResourceBase.java |  16 +-
 .../apache/hadoop/hbase/rest/SchemaResource.java   |  18 +-
 .../hadoop/hbase/rest/client/RemoteHTable.java     | 246 +++-------
 .../hadoop/hbase/rest/TestScannerResource.java     |  10 +-
 .../hadoop/hbase/rest/client/TestRemoteTable.java  |   7 +-
 .../hbase/client/SharedAsyncConnection.java}       | 109 ++---
 .../hbase/{ => client}/SharedConnection.java       |  18 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |   5 +-
 .../hadoop/hbase/master/MasterCoprocessorHost.java |   2 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  68 +--
 .../hbase/regionserver/RegionCoprocessorHost.java  |   2 +-
 .../regionserver/RegionServerCoprocessorHost.java  |   2 +-
 .../hbase/security/access/AccessController.java    |  13 +-
 .../apache/hadoop/hbase/util/MultiHConnection.java | 141 ------
 .../main/resources/hbase-webapps/master/table.jsp  |   5 +-
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |  95 ++--
 .../hbase/TestPartialResultsFromClientSide.java    |  23 +-
 .../TestServerSideScanMetricsFromClientSide.java   |  51 +-
 .../example/TestZooKeeperTableArchiveClient.java   |  19 +-
 .../client/AbstractTestCIOperationTimeout.java     |   4 +-
 .../hbase/client/AbstractTestCIRpcTimeout.java     |   2 +-
 .../hadoop/hbase/client/AbstractTestCITimeout.java |   2 +-
 .../hbase/client/DummyAsyncClusterConnection.java  |   5 +
 .../org/apache/hadoop/hbase/client/TestAdmin1.java |  24 +-
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |  17 +-
 .../hbase/client/TestAlwaysSetScannerId.java       |  29 +-
 .../hbase/client/TestAsyncTableAdminApi.java       |   2 +-
 .../TestAvoidCellReferencesIntoShippedBlocks.java  |   7 +
 .../hadoop/hbase/client/TestCIBadHostname.java     |  28 +-
 .../apache/hadoop/hbase/client/TestCISleep.java    |  71 +--
 .../hadoop/hbase/client/TestCheckAndMutate.java    |  30 +-
 .../hadoop/hbase/client/TestClientPushback.java    |   5 +-
 .../hbase/client/TestConnectionImplementation.java |   3 +
 .../hadoop/hbase/client/TestFromClientSide.java    | 233 ++++-----
 .../hadoop/hbase/client/TestFromClientSide3.java   | 224 +++++----
 .../client/TestFromClientSideScanExcpetion.java    |   9 +-
 .../hbase/client/TestGetProcedureResult.java       |   7 +-
 .../hbase/client/TestIncrementsFromClientSide.java |  61 +--
 .../hadoop/hbase/client/TestLeaseRenewal.java      | 136 ------
 .../hbase/client/TestMalformedCellFromClient.java  |  36 +-
 .../apache/hadoop/hbase/client/TestMetaCache.java  |   5 +
 .../hadoop/hbase/client/TestMetaWithReplicas.java  |  11 +-
 .../client/TestMultiActionMetricsFromClient.java   |  13 +-
 .../hadoop/hbase/client/TestMultiParallel.java     | 108 ++---
 .../hbase/client/TestMultiRespectsLimits.java      |  21 +-
 .../hbase/client/TestRegionLocationCaching.java    |   5 +
 .../hbase/client/TestReplicaWithCluster.java       |   8 +-
 .../hadoop/hbase/client/TestReplicasClient.java    |  38 +-
 .../hbase/client/TestScanWithoutFetchingData.java  |  27 +-
 .../hbase/client/TestScannersFromClientSide.java   | 168 +------
 .../hbase/client/TestSeparateClientZKCluster.java  |  48 +-
 .../hbase/client/TestShortCircuitConnection.java   |  95 ----
 ...C.java => TestCoprocessorSharedConnection.java} |  23 +-
 .../TestPassCustomCellViaRegionObserver.java       |   5 +-
 .../hbase/filter/TestMultiRowRangeFilter.java      |  45 +-
 .../hadoop/hbase/master/TestMasterShutdown.java    |  21 +-
 .../hadoop/hbase/master/TestWarmupRegion.java      |   4 +-
 .../hadoop/hbase/regionserver/RegionAsTable.java   |  89 ----
 .../regionserver/TestEndToEndSplitTransaction.java |   2 +-
 .../hbase/regionserver/TestHRegionFileSystem.java  |  17 +-
 .../TestNewVersionBehaviorFromClientSide.java      |   7 +-
 .../regionserver/TestPerColumnFamilyFlush.java     |   7 -
 .../regionserver/TestRegionServerMetrics.java      |  91 ++--
 .../regionserver/TestScannerHeartbeatMessages.java |   5 +
 .../TestSettingTimeoutOnBlockingPoint.java         |  14 +-
 .../hbase/replication/TestReplicationBase.java     |   2 +-
 .../hbase/replication/TestReplicationWithTags.java |   2 +-
 .../TestGlobalReplicationThrottler.java            |   2 +-
 .../TestCoprocessorWhitelistMasterObserver.java    |   8 +-
 ...tVisibilityLabelReplicationWithExpAsString.java |   2 +-
 .../TestVisibilityLabelsReplication.java           |   2 +-
 .../hbase/snapshot/TestRegionSnapshotTask.java     |   2 +-
 .../apache/hadoop/hbase/tool/TestCanaryTool.java   |   3 +-
 .../hadoop/hbase/util/MultiThreadedAction.java     |   5 +-
 .../util/hbck/OfflineMetaRebuildTestCore.java      |   7 +-
 .../hbase/thrift/ThriftHBaseServiceHandler.java    |  12 +-
 .../hbase/thrift2/ThriftHBaseServiceHandler.java   |   2 +-
 .../hbase/thrift2/client/ThriftConnection.java     |   6 +
 .../hadoop/hbase/thrift2/client/ThriftTable.java   |   4 +-
 .../hadoop/hbase/thrift2/TestThriftConnection.java |   2 +-
 112 files changed, 2109 insertions(+), 2490 deletions(-)

diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 2afdb4f..e0fca20 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
@@ -354,9 +353,9 @@ public class TestBackupBase {
     TEST_UTIL.shutdownMiniMapReduceCluster();
   }
 
-  HTable insertIntoTable(Connection conn, TableName table, byte[] family, int id, int numRows)
+  Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, int numRows)
       throws IOException {
-    HTable t = (HTable) conn.getTable(table);
+    Table t = conn.getTable(table);
     Put p1;
     for (int i = 0; i < numRows; i++) {
       p1 = new Put(Bytes.toBytes("row-" + table + "-" + id + "-" + i));
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
index 9603c9d..beacef3 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.backup;
 import static org.junit.Assert.assertTrue;
 
 import java.util.List;
-
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
@@ -28,7 +27,6 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.Assert;
@@ -73,14 +71,14 @@ public class TestBackupMerge extends TestBackupBase {
     assertTrue(checkSucceeded(backupIdFull));
 
     // #2 - insert some data to table1
-    HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
     LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
 
     Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS);
     t1.close();
     LOG.debug("written " + ADD_ROWS + " rows to " + table1);
 
-    HTable t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
+    Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
 
     Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS);
     t2.close();
@@ -116,7 +114,7 @@ public class TestBackupMerge extends TestBackupBase {
       tablesRestoreIncMultiple, tablesMapIncMultiple, true));
 
     Table hTable = conn.getTable(table1_restore);
-    LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
+    LOG.debug("After incremental restore: " + hTable.getDescriptor());
     int countRows = TEST_UTIL.countRows(hTable, famName);
     LOG.debug("f1 has " + countRows + " rows");
     Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows);
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
index db1a4e2..bffa480 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Assert;
@@ -67,7 +67,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
     String backupIdFull = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdFull));
     // #2 - insert some data to table table1
-    HTable t1 = (HTable) conn.getTable(table1);
+    Table t1 = conn.getTable(table1);
     Put p1;
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       p1 = new Put(Bytes.toBytes("row-t1" + i));
@@ -82,7 +82,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
     String backupIdInc1 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdInc1));
     // #4 - insert some data to table table2
-    HTable t2 = (HTable) conn.getTable(table2);
+    Table t2 = conn.getTable(table2);
     Put p2 = null;
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       p2 = new Put(Bytes.toBytes("row-t2" + i));
@@ -95,7 +95,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
     String backupIdInc2 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdInc2));
     // #6 - insert some data to table table1
-    t1 = (HTable) conn.getTable(table1);
+    t1 = conn.getTable(table1);
     for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
       p1 = new Put(Bytes.toBytes("row-t1" + i));
       p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
@@ -107,7 +107,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
     String backupIdInc3 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdInc3));
     // #8 - insert some data to table table2
-    t2 = (HTable) conn.getTable(table2);
+    t2 = conn.getTable(table2);
     for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
       p2 = new Put(Bytes.toBytes("row-t1" + i));
       p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index 749839c..35a77ea 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -101,7 +101,7 @@ public class TestIncrementalBackup extends TestBackupBase {
       assertTrue(checkSucceeded(backupIdFull));
 
       // #2 - insert some data to table
-      HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+      Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
       LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
       Assert.assertEquals(HBaseTestingUtility.countRows(t1),
               NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
@@ -115,7 +115,7 @@ public class TestIncrementalBackup extends TestBackupBase {
       Assert.assertEquals(HBaseTestingUtility.countRows(t1),
               NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
 
-      HTable t2 = (HTable) conn.getTable(table2);
+      Table t2 = conn.getTable(table2);
       Put p2;
       for (int i = 0; i < 5; i++) {
         p2 = new Put(Bytes.toBytes("row-t2" + i));
@@ -162,7 +162,7 @@ public class TestIncrementalBackup extends TestBackupBase {
       HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
 
       int NB_ROWS_FAM2 = 7;
-      HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
+      Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
       t3.close();
 
       // Wait for 5 sec to make sure that old WALs were deleted
@@ -188,11 +188,11 @@ public class TestIncrementalBackup extends TestBackupBase {
       hAdmin.close();
 
       // #6.2 - checking row count of tables for full restore
-      HTable hTable = (HTable) conn.getTable(table1_restore);
+      Table hTable = conn.getTable(table1_restore);
       Assert.assertEquals(HBaseTestingUtility.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
       hTable.close();
 
-      hTable = (HTable) conn.getTable(table2_restore);
+      hTable = conn.getTable(table2_restore);
       Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtility.countRows(hTable));
       hTable.close();
 
@@ -201,7 +201,7 @@ public class TestIncrementalBackup extends TestBackupBase {
       TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
       client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2,
               false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
-      hTable = (HTable) conn.getTable(table1_restore);
+      hTable = conn.getTable(table1_restore);
 
       LOG.debug("After incremental restore: " + hTable.getDescriptor());
       int countFamName = TEST_UTIL.countRows(hTable, famName);
@@ -217,7 +217,7 @@ public class TestIncrementalBackup extends TestBackupBase {
       Assert.assertEquals(countMobName, NB_ROWS_MOB);
       hTable.close();
 
-      hTable = (HTable) conn.getTable(table2_restore);
+      hTable = conn.getTable(table2_restore);
       Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(hTable));
       hTable.close();
       admin.close();
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
index f8129d9..08834f2 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Assert;
@@ -75,7 +75,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
     assertTrue(checkSucceeded(backupIdFull));
 
     // #2 - insert some data to table table1
-    HTable t1 = (HTable) conn.getTable(table1);
+    Table t1 = conn.getTable(table1);
     Put p1;
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       p1 = new Put(Bytes.toBytes("row-t1" + i));
@@ -110,11 +110,11 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
     assertTrue(hAdmin.tableExists(table2_restore));
 
     // #5.2 - checking row count of tables for full restore
-    HTable hTable = (HTable) conn.getTable(table1_restore);
+    Table hTable = conn.getTable(table1_restore);
     Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
     hTable.close();
 
-    hTable = (HTable) conn.getTable(table2_restore);
+    hTable = conn.getTable(table2_restore);
     Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
     hTable.close();
 
@@ -124,7 +124,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
     client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple,
       false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
 
-    hTable = (HTable) conn.getTable(table1_restore);
+    hTable = conn.getTable(table1_restore);
     Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2);
     hTable.close();
     admin.close();
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
index 57bdc46..7351258 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Pair;
@@ -245,14 +244,14 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
     assertTrue(checkSucceeded(backupIdFull));
 
     // #2 - insert some data to table1
-    HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
     LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
 
     Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS);
     t1.close();
     LOG.debug("written " + ADD_ROWS + " rows to " + table1);
 
-    HTable t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
+    Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
 
     Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS);
     t2.close();
@@ -334,7 +333,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
       tablesRestoreIncMultiple, tablesMapIncMultiple, true));
 
     Table hTable = conn.getTable(table1_restore);
-    LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
+    LOG.debug("After incremental restore: " + hTable.getDescriptor());
     LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
     Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
 
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
index 82f0fb7..4b02077 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.tool.TestBulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -79,7 +79,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     assertTrue(checkSucceeded(backupIdFull));
 
     // #2 - insert some data to table table1
-    HTable t1 = (HTable) conn.getTable(table1);
+    Table t1 = conn.getTable(table1);
     Put p1;
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       p1 = new Put(Bytes.toBytes("row-t1" + i));
@@ -127,7 +127,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1,
       false, tablesRestoreIncMultiple, tablesRestoreIncMultiple, true));
 
-    HTable hTable = (HTable) conn.getTable(table1);
+    Table hTable = conn.getTable(table1);
     Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1);
     request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
 
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
index d5829b2..f6725d9 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.ToolRunner;
@@ -100,14 +100,14 @@ public class TestIncrementalBackupWithFailures extends TestBackupBase {
     assertTrue(checkSucceeded(backupIdFull));
 
     // #2 - insert some data to table
-    HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
     LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
 
     Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
     t1.close();
     LOG.debug("written " + ADD_ROWS + " rows to " + table1);
 
-    HTable t2 = (HTable) conn.getTable(table2);
+    Table t2 = conn.getTable(table2);
     Put p2;
     for (int i = 0; i < 5; i++) {
       p2 = new Put(Bytes.toBytes("row-t2" + i));
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
index a0226e6..05826e2 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -78,7 +78,7 @@ public class TestRemoteBackup extends TestBackupBase {
       } catch (InterruptedException ie) {
       }
       try {
-        HTable t1 = (HTable) conn.getTable(table1);
+        Table t1 = conn.getTable(table1);
         Put p1;
         for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
           p1 = new Put(Bytes.toBytes("row-t1" + i));
@@ -102,7 +102,7 @@ public class TestRemoteBackup extends TestBackupBase {
     HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
 
     SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
-    HTable t1 = (HTable) conn.getTable(table1);
+    Table t1 = conn.getTable(table1);
     int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
 
     latch.countDown();
@@ -130,7 +130,7 @@ public class TestRemoteBackup extends TestBackupBase {
     assertTrue(hAdmin.tableExists(table1_restore));
 
     // #5.2 - checking row count of tables for full restore
-    HTable hTable = (HTable) conn.getTable(table1_restore);
+    Table hTable = conn.getTable(table1_restore);
     Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH);
     int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
     Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
index 9273487..6b8011e 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
@@ -38,8 +38,8 @@ import org.apache.hadoop.hbase.backup.TestBackupBase;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -107,7 +107,7 @@ public class TestBackupLogCleaner extends TestBackupBase {
       assertTrue(walFiles.size() < newWalFiles.size());
       Connection conn = ConnectionFactory.createConnection(conf1);
       // #2 - insert some data to table
-      HTable t1 = (HTable) conn.getTable(table1);
+      Table t1 = conn.getTable(table1);
       Put p1;
       for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
         p1 = new Put(Bytes.toBytes("row-t1" + i));
@@ -117,7 +117,7 @@ public class TestBackupLogCleaner extends TestBackupBase {
 
       t1.close();
 
-      HTable t2 = (HTable) conn.getTable(table2);
+      Table t2 = conn.getTable(table2);
       Put p2;
       for (int i = 0; i < 5; i++) {
         p2 = new Put(Bytes.toBytes("row-t2" + i));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
index 75971ad..0546520 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
@@ -202,6 +202,14 @@ public interface AsyncConnection extends Closeable {
   boolean isClosed();
 
   /**
+   * Convert this connection to a {@link Connection}.
+   * <p/>
+   * Usually we will return the same instance if you call this method multiple times so you can
+   * consider this as a light-weighted operation.
+   */
+  Connection toConnection();
+
+  /**
    * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to
    * be thread-safe. A new instance should be created by each thread. This is a lightweight
    * operation. Pooling or caching of the returned Hbck instance is not recommended.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index d3d50d7..4a00412 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -28,6 +28,7 @@ import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLE
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import java.io.IOException;
+import java.io.UncheckedIOException;
 import java.net.SocketAddress;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
@@ -119,6 +120,8 @@ class AsyncConnectionImpl implements AsyncConnection {
 
   private final ClusterStatusListener clusterStatusListener;
 
+  private volatile ConnectionOverAsyncConnection conn;
+
   public AsyncConnectionImpl(Configuration conf, AsyncRegistry registry, String clusterId,
       SocketAddress localAddress, User user) {
     this.conf = conf;
@@ -185,6 +188,11 @@ class AsyncConnectionImpl implements AsyncConnection {
   }
 
   @Override
+  public boolean isClosed() {
+    return closed;
+  }
+
+  @Override
   public void close() {
     // As the code below is safe to be executed in parallel, here we do not use CAS or lock, just a
     // simple volatile flag.
@@ -198,17 +206,21 @@ class AsyncConnectionImpl implements AsyncConnection {
       authService.shutdown();
     }
     metrics.ifPresent(MetricsConnection::shutdown);
+    ConnectionOverAsyncConnection c = this.conn;
+    if (c != null) {
+      c.closeConnImpl();
+    }
     closed = true;
   }
 
   @Override
-  public boolean isClosed() {
-    return closed;
+  public AsyncTableRegionLocator getRegionLocator(TableName tableName) {
+    return new AsyncTableRegionLocatorImpl(tableName, this);
   }
 
   @Override
-  public AsyncTableRegionLocator getRegionLocator(TableName tableName) {
-    return new AsyncTableRegionLocatorImpl(tableName, this);
+  public void clearRegionLocationCache() {
+    locator.clearCache();
   }
 
   // we will override this method for testing retry caller, so do not remove this method.
@@ -340,6 +352,30 @@ class AsyncConnectionImpl implements AsyncConnection {
   }
 
   @Override
+  public Connection toConnection() {
+    ConnectionOverAsyncConnection c = this.conn;
+    if (c != null) {
+      return c;
+    }
+    synchronized (this) {
+      c = this.conn;
+      if (c != null) {
+        return c;
+      }
+      try {
+        c = new ConnectionOverAsyncConnection(this,
+          ConnectionFactory.createConnectionImpl(conf, null, user));
+      } catch (IOException e) {
+        // TODO: finally we will not rely on ConnectionImplementation anymore and there will no
+        // IOException here.
+        throw new UncheckedIOException(e);
+      }
+      this.conn = c;
+    }
+    return c;
+  }
+
+  @Override
   public CompletableFuture<Hbck> getHbck() {
     CompletableFuture<Hbck> future = new CompletableFuture<>();
     addListener(registry.getMasterAddress(), (sn, error) -> {
@@ -365,11 +401,6 @@ class AsyncConnectionImpl implements AsyncConnection {
       rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout)), rpcControllerFactory);
   }
 
-  @Override
-  public void clearRegionLocationCache() {
-    locator.clearCache();
-  }
-
   Optional<MetricsConnection> getConnectionMetrics() {
     return metrics;
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java
index 9b97e93..cd5d5ad 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java
@@ -187,4 +187,8 @@ class AsyncTableResultScanner implements ResultScanner, AdvancedScanResultConsum
   public ScanMetrics getScanMetrics() {
     return scanMetrics;
   }
+
+  int getCacheSize() {
+    return queue.size();
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
index 90891f4..b88c40c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -124,7 +125,9 @@ public interface Connection extends Abortable, Closeable {
    *
    * @return a {@link BufferedMutator} for the supplied tableName.
    */
-  BufferedMutator getBufferedMutator(TableName tableName) throws IOException;
+  default BufferedMutator getBufferedMutator(TableName tableName) throws IOException {
+    return getBufferedMutator(new BufferedMutatorParams(tableName));
+  }
 
   /**
    * Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The
@@ -194,6 +197,14 @@ public interface Connection extends Abortable, Closeable {
   TableBuilder getTableBuilder(TableName tableName, ExecutorService pool);
 
   /**
+   * Convert this connection to an {@link AsyncConnection}.
+   * <p/>
+   * Usually we will return the same instance if you call this method multiple times so you can
+   * consider this as a light-weighted operation.
+   */
+  AsyncConnection toAsyncConnection();
+
+  /**
    * Retrieve an Hbck implementation to fix an HBase cluster.
    * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by
    * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance
@@ -207,7 +218,7 @@ public interface Connection extends Abortable, Closeable {
    */
   @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK)
   default Hbck getHbck() throws IOException {
-    throw new UnsupportedOperationException("Not implemented");
+    return FutureUtils.get(toAsyncConnection().getHbck());
   }
 
   /**
@@ -228,6 +239,6 @@ public interface Connection extends Abortable, Closeable {
    */
   @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK)
   default Hbck getHbck(ServerName masterServer) throws IOException {
-    throw new UnsupportedOperationException("Not implemented");
+    return toAsyncConnection().getHbck(masterServer);
   }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index ceef356..b6d0161 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -211,29 +212,34 @@ public class ConnectionFactory {
    * @return Connection object for <code>conf</code>
    */
   public static Connection createConnection(Configuration conf, ExecutorService pool,
-    final User user) throws IOException {
-    String className = conf.get(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
-      ConnectionImplementation.class.getName());
-    Class<?> clazz;
-    try {
-      clazz = Class.forName(className);
-    } catch (ClassNotFoundException e) {
-      throw new IOException(e);
-    }
-    try {
-      // Default HCM#HCI is not accessible; make it so before invoking.
-      Constructor<?> constructor = clazz.getDeclaredConstructor(Configuration.class,
-        ExecutorService.class, User.class);
-      constructor.setAccessible(true);
-      return user.runAs(
-        (PrivilegedExceptionAction<Connection>)() ->
-          (Connection) constructor.newInstance(conf, pool, user));
-    } catch (Exception e) {
-      throw new IOException(e);
+      final User user) throws IOException {
+    Class<?> clazz = conf.getClass(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
+      ConnectionOverAsyncConnection.class, Connection.class);
+    if (clazz != ConnectionOverAsyncConnection.class) {
+      try {
+        // Default HCM#HCI is not accessible; make it so before invoking.
+        Constructor<?> constructor =
+          clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class);
+        constructor.setAccessible(true);
+        return user.runAs((PrivilegedExceptionAction<Connection>) () -> (Connection) constructor
+          .newInstance(conf, pool, user));
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    } else {
+      return FutureUtils.get(createAsyncConnection(conf, user)).toConnection();
     }
   }
 
   /**
+   * Create a {@link ConnectionImplementation}, internal use only.
+   */
+  static ConnectionImplementation createConnectionImpl(Configuration conf, ExecutorService pool,
+      User user) throws IOException {
+    return new ConnectionImplementation(conf, pool, user);
+  }
+
+  /**
    * Call {@link #createAsyncConnection(Configuration)} using default HBaseConfiguration.
    * @see #createAsyncConnection(Configuration)
    * @return AsyncConnection object wrapped by CompletableFuture
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index de377c7..edfc258 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -42,7 +42,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.ReentrantLock;
 import org.apache.hadoop.conf.Configuration;
@@ -76,7 +75,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ExceptionUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -419,11 +417,6 @@ class ConnectionImplementation implements Connection, Closeable {
   }
 
   @Override
-  public BufferedMutator getBufferedMutator(TableName tableName) {
-    return getBufferedMutator(new BufferedMutatorParams(tableName));
-  }
-
-  @Override
   public RegionLocator getRegionLocator(TableName tableName) throws IOException {
     return new HRegionLocator(tableName, this);
   }
@@ -478,30 +471,8 @@ class ConnectionImplementation implements Connection, Closeable {
   private ThreadPoolExecutor getThreadPool(int maxThreads, int coreThreads, String nameHint,
       BlockingQueue<Runnable> passedWorkQueue) {
     // shared HTable thread executor not yet initialized
-    if (maxThreads == 0) {
-      maxThreads = Runtime.getRuntime().availableProcessors() * 8;
-    }
-    if (coreThreads == 0) {
-      coreThreads = Runtime.getRuntime().availableProcessors() * 8;
-    }
-    long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
-    BlockingQueue<Runnable> workQueue = passedWorkQueue;
-    if (workQueue == null) {
-      workQueue =
-        new LinkedBlockingQueue<>(maxThreads *
-            conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-                HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-      coreThreads = maxThreads;
-    }
-    ThreadPoolExecutor tpe = new ThreadPoolExecutor(
-        coreThreads,
-        maxThreads,
-        keepAliveTime,
-        TimeUnit.SECONDS,
-        workQueue,
-        Threads.newDaemonThreadFactory(toString() + nameHint));
-    tpe.allowCoreThreadTimeOut(true);
-    return tpe;
+    return ConnectionUtils.getThreadPool(conf, maxThreads, coreThreads, () -> toString() + nameHint,
+      passedWorkQueue);
   }
 
   private ThreadPoolExecutor getMetaLookupPool() {
@@ -533,21 +504,10 @@ class ConnectionImplementation implements Connection, Closeable {
 
   private void shutdownPools() {
     if (this.cleanupPool && this.batchPool != null && !this.batchPool.isShutdown()) {
-      shutdownBatchPool(this.batchPool);
+      ConnectionUtils.shutdownPool(this.batchPool);
     }
     if (this.metaLookupPool != null && !this.metaLookupPool.isShutdown()) {
-      shutdownBatchPool(this.metaLookupPool);
-    }
-  }
-
-  private void shutdownBatchPool(ExecutorService pool) {
-    pool.shutdown();
-    try {
-      if (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
-        pool.shutdownNow();
-      }
-    } catch (InterruptedException e) {
-      pool.shutdownNow();
+      ConnectionUtils.shutdownPool(this.metaLookupPool);
     }
   }
 
@@ -2217,4 +2177,9 @@ class ConnectionImplementation implements Connection, Closeable {
       throw new IOException(cause);
     }
   }
+
+  @Override
+  public AsyncConnection toAsyncConnection() {
+    throw new UnsupportedOperationException();
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
new file mode 100644
index 0000000..61cc708
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.log.HBaseMarkers;
+import org.apache.hadoop.hbase.util.FutureUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
+/**
+ * The connection implementation based on {@link AsyncConnection}.
+ */
+@InterfaceAudience.Private
+class ConnectionOverAsyncConnection implements Connection {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ConnectionOverAsyncConnection.class);
+
+  private volatile boolean aborted = false;
+
+  private volatile ExecutorService batchPool = null;
+
+  protected final AsyncConnectionImpl conn;
+
+  /**
+   * @deprecated we can not implement all the related stuffs at once so keep it here for now, will
+   *             remove it after we implement all the stuffs, like Admin, RegionLocator, etc.
+   */
+  @Deprecated
+  private final ConnectionImplementation oldConn;
+
+  private final ConnectionConfiguration connConf;
+
+  ConnectionOverAsyncConnection(AsyncConnectionImpl conn, ConnectionImplementation oldConn) {
+    this.conn = conn;
+    this.oldConn = oldConn;
+    this.connConf = new ConnectionConfiguration(conn.getConfiguration());
+  }
+
+  @Override
+  public void abort(String why, Throwable error) {
+    if (error != null) {
+      LOG.error(HBaseMarkers.FATAL, why, error);
+    } else {
+      LOG.error(HBaseMarkers.FATAL, why);
+    }
+    aborted = true;
+    try {
+      Closeables.close(this, true);
+    } catch (IOException e) {
+      throw new AssertionError(e);
+    }
+  }
+
+  @Override
+  public boolean isAborted() {
+    return aborted;
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+    return conn.getConfiguration();
+  }
+
+  @Override
+  public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException {
+    return oldConn.getBufferedMutator(params);
+  }
+
+  @Override
+  public RegionLocator getRegionLocator(TableName tableName) throws IOException {
+    return oldConn.getRegionLocator(tableName);
+  }
+
+  @Override
+  public void clearRegionLocationCache() {
+    conn.clearRegionLocationCache();
+  }
+
+  @Override
+  public Admin getAdmin() throws IOException {
+    return oldConn.getAdmin();
+  }
+
+  @Override
+  public void close() throws IOException {
+    conn.close();
+  }
+
+  // will be called from AsyncConnection, to avoid infinite loop as in the above method we will call
+  // AsyncConnection.close.
+  void closeConnImpl() {
+    ExecutorService batchPool = this.batchPool;
+    if (batchPool != null) {
+      ConnectionUtils.shutdownPool(batchPool);
+      this.batchPool = null;
+    }
+  }
+
+  @Override
+  public boolean isClosed() {
+    return conn.isClosed();
+  }
+
+  private ExecutorService getBatchPool() {
+    if (batchPool == null) {
+      synchronized (this) {
+        if (batchPool == null) {
+          int threads = conn.getConfiguration().getInt("hbase.hconnection.threads.max", 256);
+          this.batchPool = ConnectionUtils.getThreadPool(conn.getConfiguration(), threads, threads,
+            () -> toString() + "-shared", null);
+        }
+      }
+    }
+    return this.batchPool;
+  }
+
+  @Override
+  public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) {
+    return new TableBuilderBase(tableName, connConf) {
+
+      @Override
+      public Table build() {
+        ExecutorService p = pool != null ? pool : getBatchPool();
+        return new TableOverAsyncTable(conn,
+          conn.getTableBuilder(tableName).setRpcTimeout(rpcTimeout, TimeUnit.MILLISECONDS)
+            .setReadRpcTimeout(readRpcTimeout, TimeUnit.MILLISECONDS)
+            .setWriteRpcTimeout(writeRpcTimeout, TimeUnit.MILLISECONDS)
+            .setOperationTimeout(operationTimeout, TimeUnit.MILLISECONDS).build(),
+          p);
+      }
+    };
+  }
+
+  @Override
+  public AsyncConnection toAsyncConnection() {
+    return conn;
+  }
+
+  @Override
+  public Hbck getHbck() throws IOException {
+    return FutureUtils.get(conn.getHbck());
+  }
+
+  @Override
+  public Hbck getHbck(ServerName masterServer) throws IOException {
+    return conn.getHbck(masterServer);
+  }
+
+  /**
+   * An identifier that will remain the same for a given connection.
+   */
+  @Override
+  public String toString() {
+    return "connection-over-async-connection-0x" + Integer.toHexString(hashCode());
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index fe1dd3e..2fa30b5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -29,9 +29,12 @@ import java.net.UnknownHostException;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Optional;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Function;
@@ -49,9 +52,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.DNS;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -66,11 +69,9 @@ import org.apache.hbase.thirdparty.io.netty.util.Timer;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
 
 /**
  * Utility used by client connections.
@@ -141,68 +142,6 @@ public final class ConnectionUtils {
   }
 
   /**
-   * A ClusterConnection that will short-circuit RPC making direct invocations against the localhost
-   * if the invocation target is 'this' server; save on network and protobuf invocations.
-   */
-  // TODO This has to still do PB marshalling/unmarshalling stuff. Check how/whether we can avoid.
-  @VisibleForTesting // Class is visible so can assert we are short-circuiting when expected.
-  public static class ShortCircuitingClusterConnection extends ConnectionImplementation {
-    private final ServerName serverName;
-    private final AdminService.BlockingInterface localHostAdmin;
-    private final ClientService.BlockingInterface localHostClient;
-
-    private ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User user,
-        ServerName serverName, AdminService.BlockingInterface admin,
-        ClientService.BlockingInterface client) throws IOException {
-      super(conf, pool, user);
-      this.serverName = serverName;
-      this.localHostAdmin = admin;
-      this.localHostClient = client;
-    }
-
-    @Override
-    public AdminService.BlockingInterface getAdmin(ServerName sn) throws IOException {
-      return serverName.equals(sn) ? this.localHostAdmin : super.getAdmin(sn);
-    }
-
-    @Override
-    public ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-      return serverName.equals(sn) ? this.localHostClient : super.getClient(sn);
-    }
-
-    @Override
-    public MasterKeepAliveConnection getMaster() throws IOException {
-      if (this.localHostClient instanceof MasterService.BlockingInterface) {
-        return new ShortCircuitMasterConnection(
-          (MasterService.BlockingInterface) this.localHostClient);
-      }
-      return super.getMaster();
-    }
-  }
-
-  /**
-   * Creates a short-circuit connection that can bypass the RPC layer (serialization,
-   * deserialization, networking, etc..) when talking to a local server.
-   * @param conf the current configuration
-   * @param pool the thread pool to use for batch operations
-   * @param user the user the connection is for
-   * @param serverName the local server name
-   * @param admin the admin interface of the local server
-   * @param client the client interface of the local server
-   * @return an short-circuit connection.
-   * @throws IOException if IO failure occurred
-   */
-  public static ConnectionImplementation createShortCircuitConnection(final Configuration conf,
-      ExecutorService pool, User user, final ServerName serverName,
-      final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client)
-      throws IOException {
-    if (user == null) {
-      user = UserProvider.instantiate(conf).getCurrent();
-    }
-    return new ShortCircuitingClusterConnection(conf, pool, user, serverName, admin, client);
-  }
-
-  /**
    * Setup the connection class, so that it will not depend on master being online. Used for testing
    * @param conf configuration to set
    */
@@ -742,4 +681,38 @@ public final class ConnectionUtils {
     }
     return future;
   }
+
+  static ThreadPoolExecutor getThreadPool(Configuration conf, int maxThreads, int coreThreads,
+      Supplier<String> threadName, BlockingQueue<Runnable> passedWorkQueue) {
+    // shared HTable thread executor not yet initialized
+    if (maxThreads == 0) {
+      maxThreads = Runtime.getRuntime().availableProcessors() * 8;
+    }
+    if (coreThreads == 0) {
+      coreThreads = Runtime.getRuntime().availableProcessors() * 8;
+    }
+    long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
+    BlockingQueue<Runnable> workQueue = passedWorkQueue;
+    if (workQueue == null) {
+      workQueue =
+        new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
+          HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
+      coreThreads = maxThreads;
+    }
+    ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime,
+      TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory(threadName.get()));
+    tpe.allowCoreThreadTimeOut(true);
+    return tpe;
+  }
+
+  static void shutdownPool(ExecutorService pool) {
+    pool.shutdown();
+    try {
+      if (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
+        pool.shutdownNow();
+      }
+    } catch (InterruptedException e) {
+      pool.shutdownNow();
+    }
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 55b83ee..c466e61 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
@@ -2028,8 +2029,8 @@ public class HBaseAdmin implements Admin {
 
     // Check ZK first.
     // If the connection exists, we may have a connection to ZK that does not work anymore
-    try (ConnectionImplementation connection =
-      (ConnectionImplementation) ConnectionFactory.createConnection(copyOfConf)) {
+    try (ConnectionImplementation connection = ConnectionFactory.createConnectionImpl(copyOfConf,
+      null, UserProvider.instantiate(copyOfConf).getCurrent())) {
       // can throw MasterNotRunningException
       connection.isMasterRunning();
     }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index ee6247b..11ec3bb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -99,7 +99,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType
  */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
-public class HTable implements Table {
+class HTable implements Table {
   private static final Logger LOG = LoggerFactory.getLogger(HTable.class);
   private static final Consistency DEFAULT_CONSISTENCY = Consistency.STRONG;
   private final ConnectionImplementation connection;
@@ -654,22 +654,6 @@ public class HTable implements Table {
         callWithRetries(callable, this.operationTimeoutMs);
   }
 
-  @Override
-  @Deprecated
-  public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier,
-      final byte [] value, final Put put) throws IOException {
-    return doCheckAndPut(row, family, qualifier, CompareOperator.EQUAL.name(), value, null, put);
-  }
-
-  @Override
-  @Deprecated
-  public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier,
-      final CompareOperator op, final byte [] value, final Put put) throws IOException {
-    // The name of the operators in CompareOperator are intentionally those of the
-    // operators in the filter's CompareOp enum.
-    return doCheckAndPut(row, family, qualifier, op.name(), value, null, put);
-  }
-
   private boolean doCheckAndPut(final byte[] row, final byte[] family, final byte[] qualifier,
     final String opName, final byte[] value, final TimeRange timeRange, final Put put)
     throws IOException {
@@ -690,21 +674,6 @@ public class HTable implements Table {
         .callWithRetries(callable, this.operationTimeoutMs);
   }
 
-  @Override
-  @Deprecated
-  public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
-    final byte[] value, final Delete delete) throws IOException {
-    return doCheckAndDelete(row, family, qualifier, CompareOperator.EQUAL.name(), value, null,
-      delete);
-  }
-
-  @Override
-  @Deprecated
-  public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
-    final CompareOperator op, final byte[] value, final Delete delete) throws IOException {
-    return doCheckAndDelete(row, family, qualifier, op.name(), value, null, delete);
-  }
-
   private boolean doCheckAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
     final String opName, final byte[] value, final TimeRange timeRange, final Delete delete)
     throws IOException {
@@ -797,13 +766,6 @@ public class HTable implements Table {
   }
 
   @Override
-  @Deprecated
-  public boolean checkAndMutate(final byte [] row, final byte [] family, final byte [] qualifier,
-      final CompareOperator op, final byte [] value, final RowMutations rm) throws IOException {
-    return doCheckAndMutate(row, family, qualifier, op.name(), value, null, rm);
-  }
-
-  @Override
   public boolean exists(final Get get) throws IOException {
     Result r = get(get, true);
     assert r.getExists() != null;
@@ -905,23 +867,6 @@ public class HTable implements Table {
   }
 
   @Override
-  public <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
-      byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
-      throws ServiceException, Throwable {
-    final Map<byte[],R> results =  Collections.synchronizedMap(
-        new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR));
-    coprocessorService(service, startKey, endKey, callable, new Batch.Callback<R>() {
-      @Override
-      public void update(byte[] region, byte[] row, R value) {
-        if (region != null) {
-          results.put(region, value);
-        }
-      }
-    });
-    return results;
-  }
-
-  @Override
   public <T extends Service, R> void coprocessorService(final Class<T> service,
       byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
       final Batch.Callback<R> callback) throws ServiceException, Throwable {
@@ -977,93 +922,26 @@ public class HTable implements Table {
   }
 
   @Override
-  @Deprecated
-  public int getRpcTimeout() {
-    return rpcTimeoutMs;
-  }
-
-  @Override
-  @Deprecated
-  public void setRpcTimeout(int rpcTimeout) {
-    setReadRpcTimeout(rpcTimeout);
-    setWriteRpcTimeout(rpcTimeout);
-  }
-
-  @Override
   public long getReadRpcTimeout(TimeUnit unit) {
     return unit.convert(readRpcTimeoutMs, TimeUnit.MILLISECONDS);
   }
 
   @Override
-  @Deprecated
-  public int getReadRpcTimeout() {
-    return readRpcTimeoutMs;
-  }
-
-  @Override
-  @Deprecated
-  public void setReadRpcTimeout(int readRpcTimeout) {
-    this.readRpcTimeoutMs = readRpcTimeout;
-  }
-
-  @Override
   public long getWriteRpcTimeout(TimeUnit unit) {
     return unit.convert(writeRpcTimeoutMs, TimeUnit.MILLISECONDS);
   }
 
   @Override
-  @Deprecated
-  public int getWriteRpcTimeout() {
-    return writeRpcTimeoutMs;
-  }
-
-  @Override
-  @Deprecated
-  public void setWriteRpcTimeout(int writeRpcTimeout) {
-    this.writeRpcTimeoutMs = writeRpcTimeout;
-  }
-
-  @Override
   public long getOperationTimeout(TimeUnit unit) {
     return unit.convert(operationTimeoutMs, TimeUnit.MILLISECONDS);
   }
 
   @Override
-  @Deprecated
-  public int getOperationTimeout() {
-    return operationTimeoutMs;
-  }
-
-  @Override
-  @Deprecated
-  public void setOperationTimeout(int operationTimeout) {
-    this.operationTimeoutMs = operationTimeout;
-  }
-
-  @Override
   public String toString() {
     return tableName + ";" + connection;
   }
 
   @Override
-  public <R extends Message> Map<byte[], R> batchCoprocessorService(
-      Descriptors.MethodDescriptor methodDescriptor, Message request,
-      byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
-    final Map<byte[], R> results = Collections.synchronizedMap(new TreeMap<byte[], R>(
-        Bytes.BYTES_COMPARATOR));
-    batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype,
-        new Callback<R>() {
-      @Override
-      public void update(byte[] region, byte[] row, R result) {
-        if (region != null) {
-          results.put(region, result);
-        }
-      }
-    });
-    return results;
-  }
-
-  @Override
   public <R extends Message> void batchCoprocessorService(
       final Descriptors.MethodDescriptor methodDescriptor, final Message request,
       byte[] startKey, byte[] endKey, final R responsePrototype, final Callback<R> callback)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java
index 94e7d9a..3c25c57 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -57,6 +58,8 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
 
   private final long operationTimeoutNs;
 
+  private byte[] lastRegion;
+
   RegionCoprocessorRpcChannelImpl(AsyncConnectionImpl conn, TableName tableName, RegionInfo region,
       byte[] row, long rpcTimeoutNs, long operationTimeoutNs) {
     this.conn = conn;
@@ -71,15 +74,13 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
       Message responsePrototype, HBaseRpcController controller, HRegionLocation loc,
       ClientService.Interface stub) {
     CompletableFuture<Message> future = new CompletableFuture<>();
-    if (region != null
-        && !Bytes.equals(loc.getRegionInfo().getRegionName(), region.getRegionName())) {
-      future.completeExceptionally(new DoNotRetryIOException(
-          "Region name is changed, expected " + region.getRegionNameAsString() + ", actual "
-              + loc.getRegionInfo().getRegionNameAsString()));
+    if (region != null && !Bytes.equals(loc.getRegion().getRegionName(), region.getRegionName())) {
+      future.completeExceptionally(new DoNotRetryIOException("Region name is changed, expected " +
+        region.getRegionNameAsString() + ", actual " + loc.getRegion().getRegionNameAsString()));
       return future;
     }
     CoprocessorServiceRequest csr = CoprocessorRpcUtils.getCoprocessorServiceRequest(method,
-      request, row, loc.getRegionInfo().getRegionName());
+      request, row, loc.getRegion().getRegionName());
     stub.execService(controller, csr,
       new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback<CoprocessorServiceResponse>() {
 
@@ -88,6 +89,7 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
           if (controller.failed()) {
             future.completeExceptionally(controller.getFailed());
           } else {
+            lastRegion = resp.getRegion().getValue().toByteArray();
             try {
               future.complete(CoprocessorRpcUtils.getResponse(resp, responsePrototype));
             } catch (IOException e) {
@@ -99,6 +101,23 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
     return future;
   }
 
+  protected final void setError(RpcController controller, Throwable error) {
+    if (controller == null) {
+      return;
+    }
+    if (controller instanceof ServerRpcController) {
+      if (error instanceof IOException) {
+        ((ServerRpcController) controller).setFailedOn((IOException) error);
+      } else {
+        ((ServerRpcController) controller).setFailedOn(new IOException(error));
+      }
+    } else if (controller instanceof ClientCoprocessorRpcController) {
+      ((ClientCoprocessorRpcController) controller).setFailed(error);
+    } else {
+      controller.setFailed(error.toString());
+    }
+  }
+
   @Override
   public void callMethod(MethodDescriptor method, RpcController controller, Message request,
       Message responsePrototype, RpcCallback<Message> done) {
@@ -109,9 +128,13 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
         .action((c, l, s) -> rpcCall(method, request, responsePrototype, c, l, s)).call(),
       (r, e) -> {
         if (e != null) {
-          ((ClientCoprocessorRpcController) controller).setFailed(e);
+          setError(controller, e);
         }
         done.run(r);
       });
   }
+
+  public byte[] getLastRegion() {
+    return lastRegion;
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 23bb5ce..f73c3e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -1113,6 +1113,11 @@ public class Scan extends Query {
     return asyncPrefetch;
   }
 
+  /**
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async
+   *             client, the implementation is always 'async prefetch', so this flag is useless now.
+   */
+  @Deprecated
   public Scan setAsyncPrefetch(boolean asyncPrefetch) {
     this.asyncPrefetch = asyncPrefetch;
     return this;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 9268b13..def9774 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -27,14 +27,15 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CompareOperator;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -67,23 +68,6 @@ public interface Table extends Closeable {
   Configuration getConfiguration();
 
   /**
-   * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
-   * @throws java.io.IOException if a remote or network exception occurs.
-   * @deprecated since 2.0 version and will be removed in 3.0 version.
-   *             use {@link #getDescriptor()}
-   */
-  @Deprecated
-  default HTableDescriptor getTableDescriptor() throws IOException {
-    TableDescriptor descriptor = getDescriptor();
-
-    if (descriptor instanceof HTableDescriptor) {
-      return (HTableDescriptor)descriptor;
-    } else {
-      return new HTableDescriptor(descriptor);
-    }
-  }
-
-  /**
    * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table.
    * @throws java.io.IOException if a remote or network exception occurs.
    */
@@ -132,24 +116,6 @@ public interface Table extends Closeable {
   }
 
   /**
-   * Test for the existence of columns in the table, as specified by the Gets.
-   * This will return an array of booleans. Each value will be true if the related Get matches
-   * one or more keys, false if not.
-   * This is a server-side call so it prevents any data from being transferred to
-   * the client.
-   *
-   * @param gets the Gets
-   * @return Array of boolean.  True if the specified Get matches one or more keys, false if not.
-   * @throws IOException e
-   * @deprecated since 2.0 version and will be removed in 3.0 version.
-   *             use {@link #exists(List)}
-   */
-  @Deprecated
-  default boolean[] existsAll(List<Get> gets) throws IOException {
-    return exists(gets);
-  }
-
-  /**
    * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations.
    * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
    * Get in the same {@link #batch} call, you will not necessarily be
@@ -171,10 +137,15 @@ public interface Table extends Closeable {
   /**
    * Same as {@link #batch(List, Object[])}, but with a callback.
    * @since 0.96.0
+   * @deprecated since 3.0.0, will removed in 4.0.0. Please use the batch related methods in
+   *             {@link AsyncTable} directly if you want to use callback. We reuse the callback for
+   *             coprocessor here, and the problem is that for batch operation, the
+   *             {@link AsyncTable} does not tell us the region, so in this method we need an extra
+   *             locating after we get the result, which is not good.
    */
-  default <R> void batchCallback(
-    final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback)
-      throws IOException, InterruptedException {
+  @Deprecated
+  default <R> void batchCallback(final List<? extends Row> actions, final Object[] results,
+      final Batch.Callback<R> callback) throws IOException, InterruptedException {
     throw new NotImplementedException("Add an implementation!");
   }
 
@@ -285,55 +256,6 @@ public interface Table extends Closeable {
   }
 
   /**
-   * Atomically checks if a row/family/qualifier value matches the expected
-   * value. If it does, it adds the put.  If the passed value is null, the check
-   * is for the lack of column (ie: non-existance)
-   *
-   * @param row to check
-   * @param family column family to check
-   * @param qualifier column qualifier to check
-   * @param value the expected value
-   * @param put data to put if check succeeds
-   * @throws IOException e
-   * @return true if the new put was executed, false otherwise
-   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
-   */
-  @Deprecated
-  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
-      throws IOException {
-    return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, put);
-  }
-
-  /**
-   * Atomically checks if a row/family/qualifier value matches the expected
-   * value. If it does, it adds the put.  If the passed value is null, the check
-   * is for the lack of column (ie: non-existence)
-   *
-   * The expected value argument of this call is on the left and the current
-   * value of the cell is on the right side of the comparison operator.
-   *
-   * Ie. eg. GREATER operator means expected value > existing <=> add the put.
-   *
-   * @param row to check
-   * @param family column family to check
-   * @param qualifier column qualifier to check
-   * @param op comparison operator to use
-   * @param value the expected value
-   * @param put data to put if check succeeds
-   * @throws IOException e
-   * @return true if the new put was executed, false otherwise
-   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
-   */
-  @Deprecated
-  default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
-      byte[] value, Put put) throws IOException {
-    RowMutations mutations = new RowMutations(put.getRow(), 1);
-    mutations.add(put);
-
-    return checkAndMutate(row, family, qualifier, op, value, mutations);
-  }
-
-  /**
    * Deletes the specified cells/row.
    *
    * @param delete The object that specifies what to delete.
@@ -372,55 +294,6 @@ public interface Table extends Closeable {
   }
 
   /**
-   * Atomically checks if a row/family/qualifier value matches the expected
-   * value. If it does, it adds the delete.  If the passed value is null, the
-   * check is for the lack of column (ie: non-existance)
-   *
-   * @param row to check
-   * @param family column family to check
-   * @param qualifier column qualifier to check
-   * @param value the expected value
-   * @param delete data to delete if check succeeds
-   * @throws IOException e
-   * @return true if the new delete was executed, false otherwise
-   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
-   */
-  @Deprecated
-  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
-    byte[] value, Delete delete) throws IOException {
-    return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, value, delete);
-  }
-
-  /**
-   * Atomically checks if a row/family/qualifier value matches the expected
-   * value. If it does, it adds the delete.  If the passed value is null, the
-   * check is for the lack of column (ie: non-existence)
-   *
-   * The expected value argument of this call is on the left and the current
-   * value of the cell is on the right side of the comparison operator.
-   *
-   * Ie. eg. GREATER operator means expected value > existing <=> add the delete.
-   *
-   * @param row to check
-   * @param family column family to check
-   * @param qualifier column qualifier to check
-   * @param op comparison operator to use
-   * @param value the expected value
-   * @param delete data to delete if check succeeds
-   * @throws IOException e
-   * @return true if the new delete was executed, false otherwise
-   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
-   */
-  @Deprecated
-  default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
-                         CompareOperator op, byte[] value, Delete delete) throws IOException {
-    RowMutations mutations = new RowMutations(delete.getRow(), 1);
-    mutations.add(delete);
-
-    return checkAndMutate(row, family, qualifier, op, value, mutations);
-  }
-
-  /**
    * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
    * adds the Put/Delete/RowMutations.
    * <p>
@@ -587,32 +460,35 @@ public interface Table extends Closeable {
   }
 
   /**
-   * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
-   * table region containing the specified row.  The row given does not actually have
-   * to exist.  Whichever region would contain the row based on start and end keys will
-   * be used.  Note that the {@code row} parameter is also not passed to the
-   * coprocessor handler registered for this protocol, unless the {@code row}
-   * is separately passed as an argument in the service request.  The parameter
-   * here is only used to locate the region used to handle the call.
-   *
+   * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the table
+   * region containing the specified row. The row given does not actually have to exist. Whichever
+   * region would contain the row based on start and end keys will be used. Note that the
+   * {@code row} parameter is also not passed to the coprocessor handler registered for this
+   * protocol, unless the {@code row} is separately passed as an argument in the service request.
+   * The parameter here is only used to locate the region used to handle the call.
    * <p>
    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
    * </p>
+   * <div style="background-color: #cccccc; padding: 2px"> <blockquote>
    *
-   * <div style="background-color: #cccccc; padding: 2px">
-   * <blockquote><pre>
+   * <pre>
    * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
    * MyCallRequest request = MyCallRequest.newBuilder()
    *     ...
    *     .build();
    * MyCallResponse response = service.myCall(null, request);
-   * </pre></blockquote></div>
+   * </pre>
    *
+   * </blockquote></div>
    * @param row The row key used to identify the remote region location
    * @return A CoprocessorRpcChannel instance
+   * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
+   *             more. Use the coprocessorService methods in {@link AsyncTable} instead.
+   * @see Connection#toAsyncConnection()
    */
+  @Deprecated
   default CoprocessorRpcChannel coprocessorService(byte[] row) {
     throw new NotImplementedException("Add an implementation!");
   }
@@ -622,25 +498,41 @@ public interface Table extends Closeable {
    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
    * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
    * with each {@link com.google.protobuf.Service} instance.
-   *
    * @param service the protocol buffer {@code Service} implementation to call
-   * @param startKey start region selection with region containing this row.  If {@code null}, the
-   *   selection will start with the first table region.
+   * @param startKey start region selection with region containing this row. If {@code null}, the
+   *          selection will start with the first table region.
    * @param endKey select regions up to and including the region containing this row. If
-   *   {@code null}, selection will continue through the last table region.
+   *          {@code null}, selection will continue through the last table region.
    * @param callable this instance's
-   *   {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
-   *   method will be invoked once per table region, using the {@link com.google.protobuf.Service}
-   *   instance connected to that region.
+   *          {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will be
+   *          invoked once per table region, using the {@link com.google.protobuf.Service} instance
+   *          connected to that region.
    * @param <T> the {@link com.google.protobuf.Service} subclass to connect to
-   * @param <R> Return type for the {@code callable} parameter's {@link
-   * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+   * @param <R> Return type for the {@code callable} parameter's
+   *          {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
    * @return a map of result values keyed by region name
+   * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
+   *             interface for of a protobuf stub, so it is not possible to do it in an asynchronous
+   *             way, even if now we are building the {@link Table} implementation based on the
+   *             {@link AsyncTable}, which is not good. Use the coprocessorService methods in
+   *             {@link AsyncTable} directly instead.
+   * @see Connection#toAsyncConnection()
    */
-  default <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
-    byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
-    throws ServiceException, Throwable {
-    throw new NotImplementedException("Add an implementation!");
+  @Deprecated
+  default <T extends Service, R> Map<byte[], R> coprocessorService(final Class<T> service,
+      byte[] startKey, byte[] endKey, final Batch.Call<T, R> callable)
+      throws ServiceException, Throwable {
+    Map<byte[], R> results =
+      Collections.synchronizedMap(new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR));
+    coprocessorService(service, startKey, endKey, callable, new Batch.Callback<R>() {
+      @Override
+      public void update(byte[] region, byte[] row, R value) {
+        if (region != null) {
+          results.put(region, value);
+        }
+      }
+    });
+    return results;
   }
 
   /**
@@ -648,28 +540,35 @@ public interface Table extends Closeable {
    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
    * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
    * with each {@link Service} instance.
-   *
-   * <p> The given
+   * <p>
+   * The given
    * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
    * method will be called with the return value from each region's
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. </p>
-   *
+   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
+   * </p>
    * @param service the protocol buffer {@code Service} implementation to call
-   * @param startKey start region selection with region containing this row.  If {@code null}, the
-   *   selection will start with the first table region.
+   * @param startKey start region selection with region containing this row. If {@code null}, the
+   *          selection will start with the first table region.
    * @param endKey select regions up to and including the region containing this row. If
-   *   {@code null}, selection will continue through the last table region.
+   *          {@code null}, selection will continue through the last table region.
    * @param callable this instance's
-   *   {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
-   *   method will be invoked once per table region, using the {@link Service} instance connected to
-   *   that region.
+   *          {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will be
+   *          invoked once per table region, using the {@link Service} instance connected to that
+   *          region.
    * @param <T> the {@link Service} subclass to connect to
-   * @param <R> Return type for the {@code callable} parameter's {@link
-   * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+   * @param <R> Return type for the {@code callable} parameter's
+   *          {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+   * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
+   *             interface for of a protobuf stub, so it is not possible to do it in an asynchronous
+   *             way, even if now we are building the {@link Table} implementation based on the
+   *             {@link AsyncTable}, which is not good. Use the coprocessorService methods in
+   *             {@link AsyncTable} directly instead.
+   * @see Connection#toAsyncConnection()
    */
-  default <T extends Service, R> void coprocessorService(final Class<T> service,
-    byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
-    final Batch.Callback<R> callback) throws ServiceException, Throwable {
+  @Deprecated
+  default <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey,
+      byte[] endKey, final Batch.Call<T, R> callable, final Batch.Callback<R> callback)
+      throws ServiceException, Throwable {
     throw new NotImplementedException("Add an implementation!");
   }
 
@@ -678,27 +577,38 @@ public interface Table extends Closeable {
    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
    * the invocations to the same region server will be batched into one call. The coprocessor
    * service is invoked according to the service instance, method name and parameters.
-   *
-   * @param methodDescriptor
-   *          the descriptor for the protobuf service method to call.
-   * @param request
-   *          the method call parameters
-   * @param startKey
-   *          start region selection with region containing this row. If {@code null}, the
+   * @param methodDescriptor the descriptor for the protobuf service method to call.
+   * @param request the method call parameters
+   * @param startKey start region selection with region containing this row. If {@code null}, the
    *          selection will start with the first table region.
-   * @param endKey
-   *          select regions up to and including the region containing this row. If {@code null},
-   *          selection will continue through the last table region.
-   * @param responsePrototype
-   *          the proto type of the response of the method in Service.
-   * @param <R>
-   *          the response type for the coprocessor Service method
+   * @param endKey select regions up to and including the region containing this row. If
+   *          {@code null}, selection will continue through the last table region.
+   * @param responsePrototype the proto type of the response of the method in Service.
+   * @param <R> the response type for the coprocessor Service method
    * @return a map of result values keyed by region name
+   * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
+   *             interface for of a protobuf stub, so it is not possible to do it in an asynchronous
+   *             way, even if now we are building the {@link Table} implementation based on the
+   *             {@link AsyncTable}, which is not good. Use the coprocessorService methods in
+   *             {@link AsyncTable} directly instead.
+   * @see Connection#toAsyncConnection()
    */
+  @Deprecated
   default <R extends Message> Map<byte[], R> batchCoprocessorService(
-    Descriptors.MethodDescriptor methodDescriptor, Message request,
-    byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
-    throw new NotImplementedException("Add an implementation!");
+      Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey,
+      byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
+    final Map<byte[], R> results =
+      Collections.synchronizedMap(new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR));
+    batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype,
+      new Callback<R>() {
+        @Override
+        public void update(byte[] region, byte[] row, R result) {
+          if (region != null) {
+            results.put(region, result);
+          }
+        }
+      });
+    return results;
   }
 
   /**
@@ -706,24 +616,28 @@ public interface Table extends Closeable {
    * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
    * the invocations to the same region server will be batched into one call. The coprocessor
    * service is invoked according to the service instance, method name and parameters.
-   *
    * <p>
    * The given
    * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
    * method will be called with the return value from each region's invocation.
    * </p>
-   *
    * @param methodDescriptor the descriptor for the protobuf service method to call.
    * @param request the method call parameters
-   * @param startKey start region selection with region containing this row.
-   *   If {@code null}, the selection will start with the first table region.
-   * @param endKey select regions up to and including the region containing this row.
-   *   If {@code null}, selection will continue through the last table region.
+   * @param startKey start region selection with region containing this row. If {@code null}, the
+   *          selection will start with the first table region.
+   * @param endKey select regions up to and including the region containing this row. If
+   *          {@code null}, selection will continue through the last table region.
    * @param responsePrototype the proto type of the response of the method in Service.
    * @param callback callback to invoke with the response for each region
-   * @param <R>
-   *          the response type for the coprocessor Service method
+   * @param <R> the response type for the coprocessor Service method
+   * @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
+   *             interface for of a protobuf stub, so it is not possible to do it in an asynchronous
+   *             way, even if now we are building the {@link Table} implementation based on the
+   *             {@link AsyncTable}, which is not good. Use the coprocessorService methods in
+   *             {@link AsyncTable} directly instead.
+   * @see Connection#toAsyncConnection()
    */
+  @Deprecated
   default <R extends Message> void batchCoprocessorService(
       Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey,
       byte[] endKey, R responsePrototype, Batch.Callback<R> callback)
@@ -732,32 +646,6 @@ public interface Table extends Closeable {
   }
 
   /**
-   * Atomically checks if a row/family/qualifier value matches the expected value.
-   * If it does, it performs the row mutations.  If the passed value is null, the check
-   * is for the lack of column (ie: non-existence)
-   *
-   * The expected value argument of this call is on the left and the current
-   * value of the cell is on the right side of the comparison operator.
-   *
-   * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
-   *
-   * @param row to check
-   * @param family column family to check
-   * @param qualifier column qualifier to check
-   * @param op the comparison operator
-   * @param value the expected value
-   * @param mutation  mutations to perform if check succeeds
-   * @throws IOException e
-   * @return true if the new put was executed, false otherwise
-   * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
-   */
-  @Deprecated
-  default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
-                         byte[] value, RowMutations mutation) throws IOException {
-    throw new NotImplementedException("Add an implementation!");
-  }
-
-  /**
    * Get timeout of each rpc request in this Table instance. It will be overridden by a more
    * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
    * @see #getReadRpcTimeout(TimeUnit)
@@ -770,36 +658,6 @@ public interface Table extends Closeable {
   }
 
   /**
-   * Get timeout (millisecond) of each rpc request in this Table instance.
-   *
-   * @return Currently configured read timeout
-   * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or
-   *             {@link #getWriteRpcTimeout(TimeUnit)} instead
-   */
-  @Deprecated
-  default int getRpcTimeout() {
-    return (int)getRpcTimeout(TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Set timeout (millisecond) of each rpc request in operations of this Table instance, will
-   * override the value of hbase.rpc.timeout in configuration.
-   * If a rpc request waiting too long, it will stop waiting and send a new request to retry until
-   * retries exhausted or operation timeout reached.
-   * <p>
-   * NOTE: This will set both the read and write timeout settings to the provided value.
-   *
-   * @param rpcTimeout the timeout of each rpc request in millisecond.
-   *
-   * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead
-   */
-  @Deprecated
-  default void setRpcTimeout(int rpcTimeout) {
-    setReadRpcTimeout(rpcTimeout);
-    setWriteRpcTimeout(rpcTimeout);
-  }
-
-  /**
    * Get timeout of each rpc read request in this Table instance.
    * @param unit the unit of time the timeout to be represented in
    * @return read rpc timeout in the specified time unit
@@ -809,30 +667,6 @@ public interface Table extends Closeable {
   }
 
   /**
-   * Get timeout (millisecond) of each rpc read request in this Table instance.
-   * @deprecated since 2.0 and will be removed in 3.0 version
-   *             use {@link #getReadRpcTimeout(TimeUnit)} instead
-   */
-  @Deprecated
-  default int getReadRpcTimeout() {
-    return (int)getReadRpcTimeout(TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
-   * override the value of hbase.rpc.read.timeout in configuration.
-   * If a rpc read request waiting too long, it will stop waiting and send a new request to retry
-   * until retries exhausted or operation timeout reached.
-   *
-   * @param readRpcTimeout the timeout for read rpc request in milliseconds
-   * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
-   */
-  @Deprecated
-  default void setReadRpcTimeout(int readRpcTimeout) {
-    throw new NotImplementedException("Add an implementation!");
-  }
-
-  /**
    * Get timeout of each rpc write request in this Table instance.
    * @param unit the unit of time the timeout to be represented in
    * @return write rpc timeout in the specified time unit
@@ -842,30 +676,6 @@ public interface Table extends Closeable {
   }
 
   /**
-   * Get timeout (millisecond) of each rpc write request in this Table instance.
-   * @deprecated since 2.0 and will be removed in 3.0 version
-   *             use {@link #getWriteRpcTimeout(TimeUnit)} instead
-   */
-  @Deprecated
-  default int getWriteRpcTimeout() {
-    return (int)getWriteRpcTimeout(TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
-   * override the value of hbase.rpc.write.timeout in configuration.
-   * If a rpc write request waiting too long, it will stop waiting and send a new request to retry
-   * until retries exhausted or operation timeout reached.
-   *
-   * @param writeRpcTimeout the timeout for write rpc request in milliseconds
-   * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
-   */
-  @Deprecated
-  default void setWriteRpcTimeout(int writeRpcTimeout) {
-    throw new NotImplementedException("Add an implementation!");
-  }
-
-  /**
    * Get timeout of each operation in Table instance.
    * @param unit the unit of time the timeout to be represented in
    * @return operation rpc timeout in the specified time unit
@@ -873,30 +683,4 @@ public interface Table extends Closeable {
   default long getOperationTimeout(TimeUnit unit) {
     throw new NotImplementedException("Add an implementation!");
   }
-
-  /**
-   * Get timeout (millisecond) of each operation for in Table instance.
-   * @deprecated since 2.0 and will be removed in 3.0 version
-   *             use {@link #getOperationTimeout(TimeUnit)} instead
-   */
-  @Deprecated
-  default int getOperationTimeout() {
-    return (int)getOperationTimeout(TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Set timeout (millisecond) of each operation in this Table instance, will override the value
-   * of hbase.client.operation.timeout in configuration.
-   * Operation timeout is a top-level restriction that makes sure a blocking method will not be
-   * blocked more than this. In each operation, if rpc request fails because of timeout or
-   * other reason, it will retry until success or throw a RetriesExhaustedException. But if the
-   * total time being blocking reach the operation timeout before retries exhausted, it will break
-   * early and throw SocketTimeoutException.
-   * @param operationTimeout the total timeout of each operation in millisecond.
-   * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
-   */
-  @Deprecated
-  default void setOperationTimeout(int operationTimeout) {
-    throw new NotImplementedException("Add an implementation!");
-  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java
new file mode 100644
index 0000000..d581611
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java
@@ -0,0 +1,532 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
+import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FutureUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.primitives.Booleans;
+
+/**
+ * The table implementation based on {@link AsyncTable}.
+ */
+@InterfaceAudience.Private
+class TableOverAsyncTable implements Table {
+
+  private static final Logger LOG = LoggerFactory.getLogger(TableOverAsyncTable.class);
+
+  private final AsyncConnectionImpl conn;
+
+  private final AsyncTable<?> table;
+
+  private final ExecutorService pool;
+
+  TableOverAsyncTable(AsyncConnectionImpl conn, AsyncTable<?> table, ExecutorService pool) {
+    this.conn = conn;
+    this.table = table;
+    this.pool = pool;
+  }
+
+  @Override
+  public TableName getName() {
+    return table.getName();
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+    return table.getConfiguration();
+  }
+
+  @Override
+  public TableDescriptor getDescriptor() throws IOException {
+    return FutureUtils.get(conn.getAdmin().getDescriptor(getName()));
+  }
+
+  @Override
+  public boolean exists(Get get) throws IOException {
+    return FutureUtils.get(table.exists(get));
+  }
+
+  @Override
+  public boolean[] exists(List<Get> gets) throws IOException {
+    return Booleans.toArray(FutureUtils.get(table.existsAll(gets)));
+  }
+
+  @Override
+  public void batch(List<? extends Row> actions, Object[] results) throws IOException {
+    if (ArrayUtils.isEmpty(results)) {
+      FutureUtils.get(table.batchAll(actions));
+      return;
+    }
+    List<ThrowableWithExtraContext> errors = new ArrayList<>();
+    List<CompletableFuture<Object>> futures = table.batch(actions);
+    for (int i = 0, n = results.length; i < n; i++) {
+      try {
+        results[i] = FutureUtils.get(futures.get(i));
+      } catch (IOException e) {
+        results[i] = e;
+        errors.add(new ThrowableWithExtraContext(e, EnvironmentEdgeManager.currentTime(),
+          "Error when processing " + actions.get(i)));
+      }
+    }
+    if (!errors.isEmpty()) {
+      throw new RetriesExhaustedException(errors.size(), errors);
+    }
+  }
+
+  @Override
+  public <R> void batchCallback(List<? extends Row> actions, Object[] results, Callback<R> callback)
+      throws IOException, InterruptedException {
+    ConcurrentLinkedQueue<ThrowableWithExtraContext> errors = new ConcurrentLinkedQueue<>();
+    CountDownLatch latch = new CountDownLatch(actions.size());
+    AsyncTableRegionLocator locator = conn.getRegionLocator(getName());
+    List<CompletableFuture<R>> futures = table.<R> batch(actions);
+    for (int i = 0, n = futures.size(); i < n; i++) {
+      final int index = i;
+      FutureUtils.addListener(futures.get(i), (r, e) -> {
+        if (e != null) {
+          errors.add(new ThrowableWithExtraContext(e, EnvironmentEdgeManager.currentTime(),
+            "Error when processing " + actions.get(index)));
+          if (!ArrayUtils.isEmpty(results)) {
+            results[index] = e;
+          }
+          latch.countDown();
+        } else {
+          if (!ArrayUtils.isEmpty(results)) {
+            results[index] = r;
+          }
+          FutureUtils.addListener(locator.getRegionLocation(actions.get(index).getRow()),
+            (l, le) -> {
+              if (le != null) {
+                errors.add(new ThrowableWithExtraContext(le, EnvironmentEdgeManager.currentTime(),
+                  "Error when finding the region for row " +
+                    Bytes.toStringBinary(actions.get(index).getRow())));
+              } else {
+                callback.update(l.getRegion().getRegionName(), actions.get(index).getRow(), r);
+              }
+              latch.countDown();
+            });
+        }
+      });
+    }
+    latch.await();
+    if (!errors.isEmpty()) {
+      throw new RetriesExhaustedException(errors.size(),
+        errors.stream().collect(Collectors.toList()));
+    }
+  }
+
+  @Override
+  public Result get(Get get) throws IOException {
+    return FutureUtils.get(table.get(get));
+  }
+
+  @Override
+  public Result[] get(List<Get> gets) throws IOException {
+    return FutureUtils.get(table.getAll(gets)).toArray(new Result[0]);
+  }
+
+  @Override
+  public ResultScanner getScanner(Scan scan) throws IOException {
+    return table.getScanner(scan);
+  }
+
+  @Override
+  public ResultScanner getScanner(byte[] family) throws IOException {
+    return table.getScanner(family);
+  }
+
+  @Override
+  public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
+    return table.getScanner(family, qualifier);
+  }
+
+  @Override
+  public void put(Put put) throws IOException {
+    FutureUtils.get(table.put(put));
+  }
+
+  @Override
+  public void put(List<Put> puts) throws IOException {
+    FutureUtils.get(table.putAll(puts));
+  }
+
+  @Override
+  public void delete(Delete delete) throws IOException {
+    FutureUtils.get(table.delete(delete));
+  }
+
+  @Override
+  public void delete(List<Delete> deletes) throws IOException {
+    FutureUtils.get(table.deleteAll(deletes));
+  }
+
+  private static final class CheckAndMutateBuilderImpl implements CheckAndMutateBuilder {
+
+    private final AsyncTable.CheckAndMutateBuilder builder;
+
+    public CheckAndMutateBuilderImpl(
+        org.apache.hadoop.hbase.client.AsyncTable.CheckAndMutateBuilder builder) {
+      this.builder = builder;
+    }
+
+    @Override
+    public CheckAndMutateBuilder qualifier(byte[] qualifier) {
+      builder.qualifier(qualifier);
+      return this;
+    }
+
+    @Override
+    public CheckAndMutateBuilder timeRange(TimeRange timeRange) {
+      builder.timeRange(timeRange);
+      return this;
+    }
+
+    @Override
+    public CheckAndMutateBuilder ifNotExists() {
+      builder.ifNotExists();
+      return this;
+    }
+
+    @Override
+    public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value) {
+      builder.ifMatches(compareOp, value);
+      return this;
+    }
+
+    @Override
+    public boolean thenPut(Put put) throws IOException {
+      return FutureUtils.get(builder.thenPut(put));
+    }
+
+    @Override
+    public boolean thenDelete(Delete delete) throws IOException {
+      return FutureUtils.get(builder.thenDelete(delete));
+    }
+
+    @Override
+    public boolean thenMutate(RowMutations mutation) throws IOException {
+      return FutureUtils.get(builder.thenMutate(mutation));
+    }
+  }
+
+  @Override
+  public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
+    return new CheckAndMutateBuilderImpl(table.checkAndMutate(row, family));
+  }
+
+  @Override
+  public void mutateRow(RowMutations rm) throws IOException {
+    FutureUtils.get(table.mutateRow(rm));
+  }
+
+  @Override
+  public Result append(Append append) throws IOException {
+    return FutureUtils.get(table.append(append));
+  }
+
+  @Override
+  public Result increment(Increment increment) throws IOException {
+    return FutureUtils.get(table.increment(increment));
+  }
+
+  @Override
+  public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
+      throws IOException {
+    return FutureUtils.get(table.incrementColumnValue(row, family, qualifier, amount));
+  }
+
+  @Override
+  public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
+      Durability durability) throws IOException {
+    return FutureUtils.get(table.incrementColumnValue(row, family, qualifier, amount, durability));
+  }
+
+  @Override
+  public void close() {
+  }
+
+  private static final class BlockingRpcCallback<R> implements RpcCallback<R> {
+    private R result;
+    private boolean resultSet = false;
+
+    /**
+     * Called on completion of the RPC call with the response object, or {@code null} in the case of
+     * an error.
+     * @param parameter the response object or {@code null} if an error occurred
+     */
+    @Override
+    public void run(R parameter) {
+      synchronized (this) {
+        result = parameter;
+        resultSet = true;
+        this.notifyAll();
+      }
+    }
+
+    /**
+     * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
+     * passed. When used asynchronously, this method will block until the {@link #run(Object)}
+     * method has been called.
+     * @return the response object or {@code null} if no response was passed
+     */
+    public synchronized R get() throws IOException {
+      while (!resultSet) {
+        try {
+          this.wait();
+        } catch (InterruptedException ie) {
+          InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
+          exception.initCause(ie);
+          throw exception;
+        }
+      }
+      return result;
+    }
+  }
+
+  private static final class RegionCoprocessorRpcChannel extends RegionCoprocessorRpcChannelImpl
+      implements CoprocessorRpcChannel {
+
+    RegionCoprocessorRpcChannel(AsyncConnectionImpl conn, TableName tableName, RegionInfo region,
+        byte[] row, long rpcTimeoutNs, long operationTimeoutNs) {
+      super(conn, tableName, region, row, rpcTimeoutNs, operationTimeoutNs);
+    }
+
+    @Override
+    public void callMethod(MethodDescriptor method, RpcController controller, Message request,
+        Message responsePrototype, RpcCallback<Message> done) {
+      ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
+      BlockingRpcCallback<Message> callback = new BlockingRpcCallback<>();
+      super.callMethod(method, c, request, responsePrototype, callback);
+      Message ret;
+      try {
+        ret = callback.get();
+      } catch (IOException e) {
+        setError(controller, e);
+        return;
+      }
+      if (c.failed()) {
+        setError(controller, c.getFailed());
+      }
+      done.run(ret);
+    }
+
+    @Override
+    public Message callBlockingMethod(MethodDescriptor method, RpcController controller,
+        Message request, Message responsePrototype) throws ServiceException {
+      ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
+      BlockingRpcCallback<Message> done = new BlockingRpcCallback<>();
+      callMethod(method, c, request, responsePrototype, done);
+      Message ret;
+      try {
+        ret = done.get();
+      } catch (IOException e) {
+        throw new ServiceException(e);
+      }
+      if (c.failed()) {
+        setError(controller, c.getFailed());
+        throw new ServiceException(c.getFailed());
+      }
+      return ret;
+    }
+  }
+
+  @Override
+  public RegionCoprocessorRpcChannel coprocessorService(byte[] row) {
+    return new RegionCoprocessorRpcChannel(conn, getName(), null, row,
+      getRpcTimeout(TimeUnit.NANOSECONDS), getOperationTimeout(TimeUnit.NANOSECONDS));
+  }
+
+  /**
+   * Get the corresponding start keys and regions for an arbitrary range of keys.
+   * <p>
+   * @param startKey Starting row in range, inclusive
+   * @param endKey Ending row in range
+   * @param includeEndKey true if endRow is inclusive, false if exclusive
+   * @return A pair of list of start keys and list of HRegionLocations that contain the specified
+   *         range
+   * @throws IOException if a remote or network exception occurs
+   */
+  private Pair<List<byte[]>, List<HRegionLocation>> getKeysAndRegionsInRange(final byte[] startKey,
+      final byte[] endKey, final boolean includeEndKey) throws IOException {
+    return getKeysAndRegionsInRange(startKey, endKey, includeEndKey, false);
+  }
+
+  /**
+   * Get the corresponding start keys and regions for an arbitrary range of keys.
+   * <p>
+   * @param startKey Starting row in range, inclusive
+   * @param endKey Ending row in range
+   * @param includeEndKey true if endRow is inclusive, false if exclusive
+   * @param reload true to reload information or false to use cached information
+   * @return A pair of list of start keys and list of HRegionLocations that contain the specified
+   *         range
+   * @throws IOException if a remote or network exception occurs
+   */
+  private Pair<List<byte[]>, List<HRegionLocation>> getKeysAndRegionsInRange(final byte[] startKey,
+      final byte[] endKey, final boolean includeEndKey, final boolean reload) throws IOException {
+    final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW);
+    if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) {
+      throw new IllegalArgumentException(
+        "Invalid range: " + Bytes.toStringBinary(startKey) + " > " + Bytes.toStringBinary(endKey));
+    }
+    List<byte[]> keysInRange = new ArrayList<>();
+    List<HRegionLocation> regionsInRange = new ArrayList<>();
+    byte[] currentKey = startKey;
+    do {
+      HRegionLocation regionLocation =
+        FutureUtils.get(conn.getRegionLocator(getName()).getRegionLocation(currentKey, reload));
+      keysInRange.add(currentKey);
+      regionsInRange.add(regionLocation);
+      currentKey = regionLocation.getRegion().getEndKey();
+    } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) &&
+      (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0 ||
+        (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0)));
+    return new Pair<>(keysInRange, regionsInRange);
+  }
+
+  private List<byte[]> getStartKeysInRange(byte[] start, byte[] end) throws IOException {
+    if (start == null) {
+      start = HConstants.EMPTY_START_ROW;
+    }
+    if (end == null) {
+      end = HConstants.EMPTY_END_ROW;
+    }
+    return getKeysAndRegionsInRange(start, end, true).getFirst();
+  }
+
+  @FunctionalInterface
+  private interface StubCall<R> {
+    R call(RegionCoprocessorRpcChannel channel) throws Exception;
+  }
+
+  private <R> void coprocssorService(String serviceName, byte[] startKey, byte[] endKey,
+      Callback<R> callback, StubCall<R> call) throws Throwable {
+    // get regions covered by the row range
+    List<byte[]> keys = getStartKeysInRange(startKey, endKey);
+    Map<byte[], Future<R>> futures = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    for (byte[] r : keys) {
+      RegionCoprocessorRpcChannel channel = coprocessorService(r);
+      Future<R> future = pool.submit(new Callable<R>() {
+        @Override
+        public R call() throws Exception {
+          R result = call.call(channel);
+          byte[] region = channel.getLastRegion();
+          if (callback != null) {
+            callback.update(region, r, result);
+          }
+          return result;
+        }
+      });
+      futures.put(r, future);
+    }
+    for (Map.Entry<byte[], Future<R>> e : futures.entrySet()) {
+      try {
+        e.getValue().get();
+      } catch (ExecutionException ee) {
+        LOG.warn("Error calling coprocessor service " + serviceName + " for row " +
+          Bytes.toStringBinary(e.getKey()), ee);
+        throw ee.getCause();
+      } catch (InterruptedException ie) {
+        throw new InterruptedIOException("Interrupted calling coprocessor service " + serviceName +
+          " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie);
+      }
+    }
+  }
+
+  @Override
+  public <T extends Service, R> void coprocessorService(Class<T> service, byte[] startKey,
+      byte[] endKey, Call<T, R> callable, Callback<R> callback) throws ServiceException, Throwable {
+    coprocssorService(service.getName(), startKey, endKey, callback, channel -> {
+      T instance = org.apache.hadoop.hbase.protobuf.ProtobufUtil.newServiceStub(service, channel);
+      return callable.call(instance);
+    });
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public <R extends Message> void batchCoprocessorService(MethodDescriptor methodDescriptor,
+      Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback<R> callback)
+      throws ServiceException, Throwable {
+    coprocssorService(methodDescriptor.getFullName(), startKey, endKey, callback, channel -> {
+      return (R) channel.callBlockingMethod(methodDescriptor, null, request, responsePrototype);
+    });
+  }
+
+  @Override
+  public long getRpcTimeout(TimeUnit unit) {
+    return table.getRpcTimeout(unit);
+  }
+
+  @Override
+  public long getReadRpcTimeout(TimeUnit unit) {
+    return table.getReadRpcTimeout(unit);
+  }
+
+  @Override
+  public long getWriteRpcTimeout(TimeUnit unit) {
+    return table.getWriteRpcTimeout(unit);
+  }
+
+  @Override
+  public long getOperationTimeout(TimeUnit unit) {
+    return table.getOperationTimeout(unit);
+  }
+
+  @Override
+  public RegionLocator getRegionLocator() throws IOException {
+    return conn.toConnection().getRegionLocator(getName());
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
index 6ae7027..43d135b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
@@ -22,12 +22,18 @@ import com.google.protobuf.RpcChannel;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * Base interface which provides clients with an RPC connection to
- * call coprocessor endpoint {@link com.google.protobuf.Service}s.
+ * Base interface which provides clients with an RPC connection to call coprocessor endpoint
+ * {@link com.google.protobuf.Service}s.
+ * <p/>
  * Note that clients should not use this class directly, except through
  * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}.
+ * <p/>
+ * @deprecated Please stop using this class again, as it is too low level, which is part of the rpc
+ *             framework for HBase. Will be deleted in 4.0.0.
  */
+@Deprecated
 @InterfaceAudience.Public
-public interface CoprocessorRpcChannel extends RpcChannel, BlockingRpcChannel {}
+public interface CoprocessorRpcChannel extends RpcChannel, BlockingRpcChannel {
+}
 // This Interface is part of our public, client-facing API!!!
 // This belongs in client package but it is exposed in our public API so we cannot relocate.
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/SimpleRegistry.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/SimpleRegistry.java
new file mode 100644
index 0000000..4d4d620
--- /dev/null
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/SimpleRegistry.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.util.FutureUtils;
+
+/**
+ * Simple cluster registry inserted in place of our usual zookeeper based one.
+ */
+class SimpleRegistry extends DoNothingAsyncRegistry {
+
+  private final ServerName metaHost;
+
+  volatile boolean closed = false;
+
+  private static final String META_HOST_CONFIG_NAME = "hbase.client.simple-registry.meta.host";
+
+  private static final String DEFAULT_META_HOST = "meta.example.org.16010,12345";
+
+  public static void setMetaHost(Configuration conf, ServerName metaHost) {
+    conf.set(META_HOST_CONFIG_NAME, metaHost.getServerName());
+  }
+
+  public SimpleRegistry(Configuration conf) {
+    super(conf);
+    this.metaHost = ServerName.valueOf(conf.get(META_HOST_CONFIG_NAME, DEFAULT_META_HOST));
+  }
+
+  @Override
+  public CompletableFuture<RegionLocations> getMetaRegionLocation() {
+    if (closed) {
+      return FutureUtils.failedFuture(new DoNotRetryIOException("Client already closed"));
+    } else {
+      return CompletableFuture.completedFuture(new RegionLocations(
+        new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, metaHost)));
+    }
+  }
+
+  @Override
+  public CompletableFuture<String> getClusterId() {
+    if (closed) {
+      return FutureUtils.failedFuture(new DoNotRetryIOException("Client already closed"));
+    } else {
+      return CompletableFuture.completedFuture(HConstants.CLUSTER_ID_DEFAULT);
+    }
+  }
+
+  @Override
+  public CompletableFuture<Integer> getCurrentNrHRS() {
+    if (closed) {
+      return FutureUtils.failedFuture(new DoNotRetryIOException("Client already closed"));
+    } else {
+      return CompletableFuture.completedFuture(1);
+    }
+  }
+
+  @Override
+  public void close() {
+    closed = true;
+  }
+}
\ No newline at end of file
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 02e4c46..fca6fe1 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -1351,7 +1351,7 @@ public class TestAsyncProcess {
       ap.previousTimeout = -1;
 
       try {
-        ht.existsAll(gets);
+        ht.exists(gets);
       } catch (ClassCastException e) {
         // No result response on this test.
       }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
index 647ea32..96bb846 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.ClassRule;
@@ -33,12 +34,12 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-@Category({SmallTests.class, ClientTests.class})
+@Category({ SmallTests.class, ClientTests.class })
 public class TestBufferedMutator {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestBufferedMutator.class);
+    HBaseClassTestRule.forClass(TestBufferedMutator.class);
 
   @Rule
   public TestName name = new TestName();
@@ -55,10 +56,12 @@ public class TestBufferedMutator {
 
   @Test
   public void testAlternateBufferedMutatorImpl() throws IOException {
-    BufferedMutatorParams params =  new BufferedMutatorParams(TableName.valueOf(name.getMethodName()));
+    BufferedMutatorParams params =
+      new BufferedMutatorParams(TableName.valueOf(name.getMethodName()));
     Configuration conf = HBaseConfiguration.create();
     conf.set(AsyncRegistryFactory.REGISTRY_IMPL_CONF_KEY, DoNothingAsyncRegistry.class.getName());
-    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+    try (ConnectionImplementation connection = ConnectionFactory.createConnectionImpl(conf, null,
+      UserProvider.instantiate(conf).getCurrent())) {
       BufferedMutator bm = connection.getBufferedMutator(params);
       // Assert we get default BM if nothing specified.
       assertTrue(bm instanceof BufferedMutatorImpl);
@@ -70,7 +73,8 @@ public class TestBufferedMutator {
     // Now try creating a Connection after setting an alterate BufferedMutator into
     // the configuration and confirm we get what was expected.
     conf.set(BufferedMutator.CLASSNAME_KEY, MyBufferedMutator.class.getName());
-    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+    try (Connection connection = ConnectionFactory.createConnectionImpl(conf, null,
+      UserProvider.instantiate(conf).getCurrent())) {
       BufferedMutator bm = connection.getBufferedMutator(params);
       assertTrue(bm instanceof MyBufferedMutator);
     }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index 3cab09d..fd3a4f8 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -28,7 +28,6 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Random;
 import java.util.SortedMap;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -43,10 +42,8 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.RegionTooBusyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -118,37 +115,11 @@ public class TestClientNoCluster extends Configured implements Tool {
   @Before
   public void setUp() throws Exception {
     this.conf = HBaseConfiguration.create();
-    // Run my Connection overrides.  Use my little ConnectionImplementation below which
+    // Run my Connection overrides. Use my little ConnectionImplementation below which
     // allows me insert mocks and also use my Registry below rather than the default zk based
     // one so tests run faster and don't have zk dependency.
     this.conf.set("hbase.client.registry.impl", SimpleRegistry.class.getName());
-  }
-
-  /**
-   * Simple cluster registry inserted in place of our usual zookeeper based one.
-   */
-  static class SimpleRegistry extends DoNothingAsyncRegistry {
-    final ServerName META_HOST = META_SERVERNAME;
-
-    public SimpleRegistry(Configuration conf) {
-      super(conf);
-    }
-
-    @Override
-    public CompletableFuture<RegionLocations> getMetaRegionLocation() {
-      return CompletableFuture.completedFuture(new RegionLocations(
-          new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, META_HOST)));
-    }
-
-    @Override
-    public CompletableFuture<String> getClusterId() {
-      return CompletableFuture.completedFuture(HConstants.CLUSTER_ID_DEFAULT);
-    }
-
-    @Override
-    public CompletableFuture<Integer> getCurrentNrHRS() {
-      return CompletableFuture.completedFuture(1);
-    }
+    SimpleRegistry.setMetaHost(conf, META_SERVERNAME);
   }
 
   /**
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index c9f5a2e..96b8e9f 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
@@ -521,7 +522,7 @@ public class TestHFileOutputFormat2  {
     RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
     setupMockStartKeys(regionLocator);
     setupMockTableName(regionLocator);
-    HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
+    HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
     assertEquals(job.getNumReduceTasks(), 4);
   }
 
@@ -631,7 +632,7 @@ public class TestHFileOutputFormat2  {
       assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
 
       allTables.put(tableStrSingle, table);
-      tableInfo.add(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), r));
+      tableInfo.add(new HFileOutputFormat2.TableInfo(table.getDescriptor(), r));
     }
     Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
     // Generate the bulk load files
@@ -817,7 +818,7 @@ public class TestHFileOutputFormat2  {
       conf.set(HFileOutputFormat2.COMPRESSION_FAMILIES_CONF_KEY,
               HFileOutputFormat2.serializeColumnFamilyAttribute
                       (HFileOutputFormat2.compressionDetails,
-                              Arrays.asList(table.getTableDescriptor())));
+                              Arrays.asList(table.getDescriptor())));
 
       // read back family specific compression setting from the configuration
       Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
@@ -843,7 +844,7 @@ public class TestHFileOutputFormat2  {
           .setBlockCacheEnabled(false)
           .setTimeToLive(0));
     }
-    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+    Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
   }
 
   /**
@@ -889,7 +890,7 @@ public class TestHFileOutputFormat2  {
           familyToBloomType);
       conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY,
               HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails,
-              Arrays.asList(table.getTableDescriptor())));
+              Arrays.asList(table.getDescriptor())));
 
       // read back family specific data block encoding settings from the
       // configuration
@@ -917,7 +918,7 @@ public class TestHFileOutputFormat2  {
           .setBlockCacheEnabled(false)
           .setTimeToLive(0));
     }
-    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+    Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
   }
 
   /**
@@ -961,7 +962,7 @@ public class TestHFileOutputFormat2  {
       conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY,
               HFileOutputFormat2.serializeColumnFamilyAttribute
                       (HFileOutputFormat2.blockSizeDetails, Arrays.asList(table
-                              .getTableDescriptor())));
+                              .getDescriptor())));
 
       // read back family specific data block encoding settings from the
       // configuration
@@ -990,7 +991,7 @@ public class TestHFileOutputFormat2  {
           .setBlockCacheEnabled(false)
           .setTimeToLive(0));
     }
-    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+    Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
   }
 
   /**
@@ -1035,7 +1036,7 @@ public class TestHFileOutputFormat2  {
       Table table = Mockito.mock(Table.class);
       setupMockColumnFamiliesForDataBlockEncoding(table,
           familyToDataBlockEncoding);
-      HTableDescriptor tableDescriptor = table.getTableDescriptor();
+      TableDescriptor tableDescriptor = table.getDescriptor();
       conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
               HFileOutputFormat2.serializeColumnFamilyAttribute
                       (HFileOutputFormat2.dataBlockEncodingDetails, Arrays
@@ -1067,7 +1068,7 @@ public class TestHFileOutputFormat2  {
           .setBlockCacheEnabled(false)
           .setTimeToLive(0));
     }
-    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+    Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
   }
 
   /**
@@ -1125,7 +1126,7 @@ public class TestHFileOutputFormat2  {
     Table table = Mockito.mock(Table.class);
     RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
     HTableDescriptor htd = new HTableDescriptor(TABLE_NAMES[0]);
-    Mockito.doReturn(htd).when(table).getTableDescriptor();
+    Mockito.doReturn(htd).when(table).getDescriptor();
     for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) {
       htd.addFamily(hcd);
     }
@@ -1145,7 +1146,7 @@ public class TestHFileOutputFormat2  {
       Job job = new Job(conf, "testLocalMRIncrementalLoad");
       job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
       setupRandomGeneratorMapper(job, false);
-      HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
+      HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
       FileOutputFormat.setOutputPath(job, dir);
       context = createTestTaskAttemptContext(job);
       HFileOutputFormat2 hof = new HFileOutputFormat2();
@@ -1411,10 +1412,8 @@ public class TestHFileOutputFormat2  {
           Admin admin = c.getAdmin();
           RegionLocator regionLocator = c.getRegionLocator(tname)) {
         Path outDir = new Path("incremental-out");
-        runIncrementalPELoad(conf,
-          Arrays
-            .asList(new HFileOutputFormat2.TableInfo(admin.getDescriptor(tname), regionLocator)),
-          outDir, false);
+        runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin
+                .getDescriptor(tname), regionLocator)), outDir, false);
       }
     } else {
       throw new RuntimeException(
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
index eff26d7..af97793 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
@@ -240,5 +241,10 @@ public class TestMultiTableInputFormatBase {
     @Override
     public void clearRegionLocationCache() {
     }
+
+    @Override
+    public AsyncConnection toAsyncConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
index 944bd10..5fd5ccf 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
@@ -290,5 +291,10 @@ public class TestTableInputFormatBase {
     @Override
     public void clearRegionLocationCache() {
     }
+
+    @Override
+    public AsyncConnection toAsyncConnection() {
+      throw new UnsupportedOperationException();
+    }
   }
 }
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
index ecfe86d..a0deb7e 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,15 +19,13 @@
 package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
-
 import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.Response;
-
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class ResourceBase implements Constants {
@@ -82,10 +79,9 @@ public class ResourceBase implements Constants {
               StringUtils.stringifyException(exp) + CRLF)
             .build());
     }
-    if (exp instanceof RetriesExhaustedWithDetailsException) {
-      RetriesExhaustedWithDetailsException retryException =
-          (RetriesExhaustedWithDetailsException) exp;
-      processException(retryException.getCause(0));
+    if (exp instanceof RetriesExhaustedException) {
+      RetriesExhaustedException retryException = (RetriesExhaustedException) exp;
+      processException(retryException.getCause());
     }
     throw new WebApplicationException(
       Response.status(Response.Status.SERVICE_UNAVAILABLE)
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
index 786fcb6..dcfe771 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.util.Map;
-
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
@@ -35,20 +34,19 @@ import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.UriInfo;
 import javax.xml.namespace.QName;
-
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
 import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 public class SchemaResource extends ResourceBase {
@@ -73,13 +71,9 @@ public class SchemaResource extends ResourceBase {
     this.tableResource = tableResource;
   }
 
-  private HTableDescriptor getTableSchema() throws IOException,
-      TableNotFoundException {
-    Table table = servlet.getTable(tableResource.getName());
-    try {
-      return table.getTableDescriptor();
-    } finally {
-      table.close();
+  private HTableDescriptor getTableSchema() throws IOException, TableNotFoundException {
+    try (Table table = servlet.getTable(tableResource.getName())) {
+      return new HTableDescriptor(table.getDescriptor());
     }
   }
 
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index bdb3838..6e7d69c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -22,20 +22,27 @@ import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
 import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
-
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
@@ -63,19 +70,9 @@ import org.apache.hadoop.hbase.rest.model.ScannerModel;
 import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.StringUtils;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 
@@ -257,36 +254,6 @@ public class RemoteHTable implements Table {
   }
 
   @Override
-  @Deprecated
-  public HTableDescriptor getTableDescriptor() throws IOException {
-    StringBuilder sb = new StringBuilder();
-    sb.append('/');
-    sb.append(Bytes.toString(name));
-    sb.append('/');
-    sb.append("schema");
-    for (int i = 0; i < maxRetries; i++) {
-      Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
-      int code = response.getCode();
-      switch (code) {
-      case 200:
-        TableSchemaModel schema = new TableSchemaModel();
-        schema.getObjectFromMessage(response.getBody());
-        return schema.getTableDescriptor();
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("schema request returned " + code);
-      }
-    }
-    throw new IOException("schema request timed out");
-  }
-
-  @Override
   public void close() throws IOException {
     client.shutdown();
   }
@@ -316,12 +283,13 @@ public class RemoteHTable implements Table {
     int maxVersions = 1;
     int count = 0;
 
-    for(Get g:gets) {
+    for (Get g : gets) {
 
-      if ( count == 0 ) {
+      if (count == 0) {
         maxVersions = g.getMaxVersions();
       } else if (g.getMaxVersions() != maxVersions) {
-        LOG.warn("MaxVersions on Gets do not match, using the first in the list ("+maxVersions+")");
+        LOG.warn(
+          "MaxVersions on Gets do not match, using the first in the list (" + maxVersions + ")");
       }
 
       if (g.getFilter() != null) {
@@ -329,7 +297,7 @@ public class RemoteHTable implements Table {
       }
 
       rows[count] = g.getRow();
-      count ++;
+      count++;
     }
 
     String spec = buildMultiRowSpec(rows, maxVersions);
@@ -346,7 +314,7 @@ public class RemoteHTable implements Table {
           CellSetModel model = new CellSetModel();
           model.getObjectFromMessage(response.getBody());
           Result[] results = buildResultFromModel(model);
-          if ( results.length > 0) {
+          if (results.length > 0) {
             return results;
           }
           // fall through
@@ -357,7 +325,7 @@ public class RemoteHTable implements Table {
           try {
             Thread.sleep(sleepTime);
           } catch (InterruptedException e) {
-            throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
           }
           break;
         default:
@@ -393,21 +361,21 @@ public class RemoteHTable implements Table {
     sb.append('/');
     sb.append(toURLEncodedBytes(put.getRow()));
     for (int i = 0; i < maxRetries; i++) {
-      Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
-        model.createProtobufOutput());
+      Response response =
+        client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
       int code = response.getCode();
       switch (code) {
-      case 200:
-        return;
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("put request failed with " + code);
+        case 200:
+          return;
+        case 509:
+          try {
+            Thread.sleep(sleepTime);
+          } catch (InterruptedException e) {
+            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
+          }
+          break;
+        default:
+          throw new IOException("put request failed with " + code);
       }
     }
     throw new IOException("put request timed out");
@@ -419,24 +387,24 @@ public class RemoteHTable implements Table {
     // ignores the row specification in the URI
 
     // separate puts by row
-    TreeMap<byte[],List<Cell>> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
-    for (Put put: puts) {
+    TreeMap<byte[], List<Cell>> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    for (Put put : puts) {
       byte[] row = put.getRow();
       List<Cell> cells = map.get(row);
       if (cells == null) {
         cells = new ArrayList<>();
         map.put(row, cells);
       }
-      for (List<Cell> l: put.getFamilyCellMap().values()) {
+      for (List<Cell> l : put.getFamilyCellMap().values()) {
         cells.addAll(l);
       }
     }
 
     // build the cell set
     CellSetModel model = new CellSetModel();
-    for (Map.Entry<byte[], List<Cell>> e: map.entrySet()) {
+    for (Map.Entry<byte[], List<Cell>> e : map.entrySet()) {
       RowModel row = new RowModel(e.getKey());
-      for (Cell cell: e.getValue()) {
+      for (Cell cell : e.getValue()) {
         row.addCell(new CellModel(cell));
       }
       model.addRow(row);
@@ -448,21 +416,21 @@ public class RemoteHTable implements Table {
     sb.append(Bytes.toString(name));
     sb.append("/$multiput"); // can be any nonexistent row
     for (int i = 0; i < maxRetries; i++) {
-      Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
-        model.createProtobufOutput());
+      Response response =
+        client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
       int code = response.getCode();
       switch (code) {
-      case 200:
-        return;
-      case 509:
-        try {
-          Thread.sleep(sleepTime);
-        } catch (InterruptedException e) {
-          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        }
-        break;
-      default:
-        throw new IOException("multiput request failed with " + code);
+        case 200:
+          return;
+        case 509:
+          try {
+            Thread.sleep(sleepTime);
+          } catch (InterruptedException e) {
+            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
+          }
+          break;
+        default:
+          throw new IOException("multiput request failed with " + code);
       }
     }
     throw new IOException("multiput request timed out");
@@ -505,7 +473,31 @@ public class RemoteHTable implements Table {
 
   @Override
   public TableDescriptor getDescriptor() throws IOException {
-    return getTableDescriptor();
+    StringBuilder sb = new StringBuilder();
+    sb.append('/');
+    sb.append(Bytes.toString(name));
+    sb.append('/');
+    sb.append("schema");
+    for (int i = 0; i < maxRetries; i++) {
+      Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
+      int code = response.getCode();
+      switch (code) {
+        case 200:
+          TableSchemaModel schema = new TableSchemaModel();
+          schema.getObjectFromMessage(response.getBody());
+          return schema.getTableDescriptor();
+        case 509:
+          try {
+            Thread.sleep(sleepTime);
+          } catch (InterruptedException e) {
+            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
+          }
+          break;
+        default:
+          throw new IOException("schema request returned " + code);
+      }
+    }
+    throw new IOException("schema request timed out");
   }
 
   class Scanner implements ResultScanner {
@@ -671,13 +663,6 @@ public class RemoteHTable implements Table {
     return true;
   }
 
-  @Override
-  @Deprecated
-  public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
-      byte[] value, Put put) throws IOException {
-    return doCheckAndPut(row, family, qualifier, value, put);
-  }
-
   private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier,
       byte[] value, Put put) throws IOException {
     // column to check-the-value
@@ -714,19 +699,6 @@ public class RemoteHTable implements Table {
     throw new IOException("checkAndPut request timed out");
   }
 
-  @Override
-  @Deprecated
-  public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
-                             CompareOperator compareOp, byte[] value, Put put) throws IOException {
-    throw new IOException("checkAndPut for non-equal comparison not implemented");
-  }
-
-  @Override
-  public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
-      byte[] value, Delete delete) throws IOException {
-    return doCheckAndDelete(row, family, qualifier, value, delete);
-  }
-
   private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier,
       byte[] value, Delete delete) throws IOException {
     Put put = new Put(row);
@@ -765,25 +737,11 @@ public class RemoteHTable implements Table {
   }
 
   @Override
-  @Deprecated
-  public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
-                                CompareOperator compareOp, byte[] value, Delete delete) throws IOException {
-    throw new IOException("checkAndDelete for non-equal comparison not implemented");
-  }
-
-  @Override
   public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
     return new CheckAndMutateBuilderImpl(row, family);
   }
 
   @Override
-  @Deprecated
-  public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
-      CompareOperator compareOp, byte[] value, RowMutations rm) throws IOException {
-    throw new UnsupportedOperationException("checkAndMutate not implemented");
-  }
-
-  @Override
   public Result increment(Increment increment) throws IOException {
     throw new IOException("Increment not supported");
   }
@@ -856,69 +814,21 @@ public class RemoteHTable implements Table {
   }
 
   @Override
-  @Deprecated
-  public void setOperationTimeout(int operationTimeout) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public int getOperationTimeout() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public void setRpcTimeout(int rpcTimeout) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public long getReadRpcTimeout(TimeUnit unit) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  @Deprecated
-  public int getRpcTimeout() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public long getRpcTimeout(TimeUnit unit) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  @Deprecated
-  public int getReadRpcTimeout() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public void setReadRpcTimeout(int readRpcTimeout) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public long getWriteRpcTimeout(TimeUnit unit) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  @Deprecated
-  public int getWriteRpcTimeout() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public void setWriteRpcTimeout(int writeRpcTimeout) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public long getOperationTimeout(TimeUnit unit) {
     throw new UnsupportedOperationException();
   }
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
index da09473..28d941c 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
@@ -377,22 +377,20 @@ public class TestScannerResource {
     assertEquals(404, response.getCode());
   }
 
-  // performs table scan during which the underlying table is disabled
-  // assert that we get 410 (Gone)
   @Test
   public void testTableScanWithTableDisable() throws IOException {
+    TEST_UTIL.getAdmin().disableTable(TABLE_TO_BE_DISABLED);
     ScannerModel model = new ScannerModel();
     model.addColumn(Bytes.toBytes(COLUMN_1));
     model.setCaching(1);
     Response response = client.put("/" + TABLE_TO_BE_DISABLED + "/scanner",
       Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
+    // we will see the exception when we actually want to get the result.
     assertEquals(201, response.getCode());
     String scannerURI = response.getLocation();
     assertNotNull(scannerURI);
-    TEST_UTIL.getAdmin().disableTable(TABLE_TO_BE_DISABLED);
-      response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF);
-    assertTrue("got " + response.getCode(), response.getCode() == 410);
+    response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF);
+    assertEquals(410, response.getCode());
   }
-
 }
 
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 3dae90c..1d7a37c 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.rest.HBaseRESTTestingUtility;
 import org.apache.hadoop.hbase.rest.RESTServlet;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -155,8 +156,8 @@ public class TestRemoteTable {
     Table table = null;
     try {
       table = TEST_UTIL.getConnection().getTable(TABLE);
-      HTableDescriptor local = table.getTableDescriptor();
-      assertEquals(remoteTable.getTableDescriptor(), local);
+      TableDescriptor local = table.getDescriptor();
+      assertEquals(remoteTable.getDescriptor(), new HTableDescriptor(local));
     } finally {
       if (null != table) table.close();
     }
@@ -505,7 +506,7 @@ public class TestRemoteTable {
     assertTrue(Bytes.equals(VALUE_1, value1));
     assertNull(value2);
     assertTrue(remoteTable.exists(get));
-    assertEquals(1, remoteTable.existsAll(Collections.singletonList(get)).length);
+    assertEquals(1, remoteTable.exists(Collections.singletonList(get)).length);
     Delete delete = new Delete(ROW_1);
 
     remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java
similarity index 53%
copy from hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
copy to hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java
index d5fc58e..0f05b21 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java
@@ -18,138 +18,95 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ipc.RpcClient;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WAL.Entry;
-import org.apache.hadoop.security.token.Token;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * Can be overridden in UT if you only want to implement part of the methods in
- * {@link AsyncClusterConnection}.
+ * Wraps a {@link AsyncConnection} to make it can't be closed.
  */
-public class DummyAsyncClusterConnection implements AsyncClusterConnection {
+@InterfaceAudience.Private
+public class SharedAsyncConnection implements AsyncConnection {
+
+  private final AsyncConnection conn;
+
+  public SharedAsyncConnection(AsyncConnection conn) {
+    this.conn = conn;
+  }
+
+  @Override
+  public boolean isClosed() {
+    return conn.isClosed();
+  }
+
+  @Override
+  public void close() throws IOException {
+    throw new UnsupportedOperationException("Shared connection");
+  }
 
   @Override
   public Configuration getConfiguration() {
-    return null;
+    return conn.getConfiguration();
   }
 
   @Override
   public AsyncTableRegionLocator getRegionLocator(TableName tableName) {
-    return null;
+    return conn.getRegionLocator(tableName);
   }
 
   @Override
   public void clearRegionLocationCache() {
+    conn.clearRegionLocationCache();
   }
 
   @Override
   public AsyncTableBuilder<AdvancedScanResultConsumer> getTableBuilder(TableName tableName) {
-    return null;
+    return conn.getTableBuilder(tableName);
   }
 
   @Override
   public AsyncTableBuilder<ScanResultConsumer> getTableBuilder(TableName tableName,
       ExecutorService pool) {
-    return null;
+    return conn.getTableBuilder(tableName, pool);
   }
 
   @Override
   public AsyncAdminBuilder getAdminBuilder() {
-    return null;
+    return conn.getAdminBuilder();
   }
 
   @Override
   public AsyncAdminBuilder getAdminBuilder(ExecutorService pool) {
-    return null;
+    return conn.getAdminBuilder(pool);
   }
 
   @Override
   public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName) {
-    return null;
+    return conn.getBufferedMutatorBuilder(tableName);
   }
 
   @Override
   public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName,
       ExecutorService pool) {
-    return null;
+    return conn.getBufferedMutatorBuilder(tableName, pool);
   }
 
   @Override
   public CompletableFuture<Hbck> getHbck() {
-    return null;
+    return conn.getHbck();
   }
 
   @Override
   public Hbck getHbck(ServerName masterServer) throws IOException {
-    return null;
-  }
-
-  @Override
-  public boolean isClosed() {
-    return false;
+    return conn.getHbck(masterServer);
   }
 
   @Override
-  public void close() throws IOException {
+  public Connection toConnection() {
+    return new SharedConnection(conn.toConnection());
   }
 
-  @Override
-  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
-    return null;
-  }
-
-  @Override
-  public NonceGenerator getNonceGenerator() {
-    return null;
-  }
-
-  @Override
-  public RpcClient getRpcClient() {
-    return null;
-  }
-
-  @Override
-  public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
-      boolean writeFlushWALMarker) {
-    return null;
-  }
-
-  @Override
-  public CompletableFuture<Long> replay(TableName tableName, byte[] encodedRegionName, byte[] row,
-      List<Entry> entries, int replicaId, int numRetries, long operationTimeoutNs) {
-    return null;
-  }
-
-  @Override
-  public CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
-      boolean reload) {
-    return null;
-  }
-
-  @Override
-  public CompletableFuture<String> prepareBulkLoad(TableName tableName) {
-    return null;
-  }
-
-  @Override
-  public CompletableFuture<Boolean> bulkLoad(TableName tableName,
-      List<Pair<byte[], String>> familyPaths, byte[] row, boolean assignSeqNum, Token<?> userToken,
-      String bulkToken, boolean copyFiles) {
-    return null;
-  }
-
-  @Override
-  public CompletableFuture<Void> cleanupBulkLoad(TableName tableName, String bulkToken) {
-    return null;
-  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SharedConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java
similarity index 85%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/SharedConnection.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java
index de0c39b..f189a2a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SharedConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java
@@ -15,22 +15,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase;
+package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.concurrent.ExecutorService;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.BufferedMutator;
-import org.apache.hadoop.hbase.client.BufferedMutatorParams;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Hbck;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.TableBuilder;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * Wraps a Connection to make it can't be closed or aborted.
+ * Wraps a {@link Connection} to make it can't be closed or aborted.
  */
 @InterfaceAudience.Private
 public class SharedConnection implements Connection {
@@ -105,4 +100,9 @@ public class SharedConnection implements Connection {
   public Hbck getHbck(ServerName masterServer) throws IOException {
     return conn.getHbck(masterServer);
   }
+
+  @Override
+  public AsyncConnection toAsyncConnection() {
+    return new SharedAsyncConnection(conn.toAsyncConnection());
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 204e380..230e02e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1857,7 +1857,7 @@ public class HMaster extends HRegionServer implements MasterServices {
         List<NormalizationPlan> plans = this.normalizer.computePlanForTable(table);
         if (plans != null) {
           for (NormalizationPlan plan : plans) {
-            plan.execute(connection.getAdmin());
+            plan.execute(asyncClusterConnection.toConnection().getAdmin());
             if (plan.getType() == PlanType.SPLIT) {
               splitPlanCount++;
             } else if (plan.getType() == PlanType.MERGE) {
@@ -3058,9 +3058,6 @@ public class HMaster extends HRegionServer implements MasterServices {
     // this is what we want especially if the Master is in startup phase doing call outs to
     // hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on
     // the rpc to timeout.
-    if (this.connection != null) {
-      this.connection.close();
-    }
     if (this.asyncClusterConnection != null) {
       this.asyncClusterConnection.close();
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 1ee548b..d4ad954 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -28,12 +28,12 @@ import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.SharedConnection;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.SharedConnection;
 import org.apache.hadoop.hbase.client.SnapshotDescription;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 7141b87..78cd5d4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.ZNodeClearer;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -157,6 +158,7 @@ import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.JvmPauseMonitor;
 import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig;
@@ -276,19 +278,6 @@ public class HRegionServer extends HasThread implements
   protected HeapMemoryManager hMemManager;
 
   /**
-   * Connection to be shared by services.
-   * <p/>
-   * Initialized at server startup and closed when server shuts down.
-   * <p/>
-   * Clients must never close it explicitly.
-   * <p/>
-   * Clients hosted by this Server should make use of this connection rather than create their own;
-   * if they create their own, there is no way for the hosting server to shutdown ongoing client
-   * RPCs.
-   */
-  protected Connection connection;
-
-  /**
    * The asynchronous cluster connection to be shared by services.
    */
   protected AsyncClusterConnection asyncClusterConnection;
@@ -828,29 +817,7 @@ public class HRegionServer extends HasThread implements
   }
 
   /**
-   * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the
-   * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server.
-   */
-  private Connection createConnection() throws IOException {
-    // Create a cluster connection that when appropriate, can short-circuit and go directly to the
-    // local server if the request is to the local server bypassing RPC. Can be used for both local
-    // and remote invocations.
-    Connection conn =
-      ConnectionUtils.createShortCircuitConnection(unsetClientZookeeperQuorum(), null,
-        userProvider.getCurrent(), serverName, rpcServices, rpcServices);
-    // This is used to initialize the batch thread pool inside the connection implementation.
-    // When deploy a fresh cluster, we may first use the cluster connection in InitMetaProcedure,
-    // which will be executed inside the PEWorker, and then the batch thread pool will inherit the
-    // thread group of PEWorker, which will be destroy when shutting down the ProcedureExecutor. It
-    // will cause lots of procedure related UTs to fail, so here let's initialize it first, no harm.
-    conn.getTable(TableName.META_TABLE_NAME).close();
-    return conn;
-  }
-
-  /**
    * Run test on configured codecs to make sure supporting libs are in place.
-   * @param c
-   * @throws IOException
    */
   private static void checkCodecs(final Configuration c) throws IOException {
     // check to see if the codec list is available:
@@ -872,11 +839,12 @@ public class HRegionServer extends HasThread implements
    * Setup our cluster connection if not already initialized.
    */
   protected final synchronized void setupClusterConnection() throws IOException {
-    if (connection == null) {
-      connection = createConnection();
+    if (asyncClusterConnection == null) {
+      Configuration conf = unsetClientZookeeperQuorum();
+      InetSocketAddress localAddress = new InetSocketAddress(this.rpcServices.isa.getAddress(), 0);
+      User user = userProvider.getCurrent();
       asyncClusterConnection =
-        ClusterConnectionFactory.createAsyncClusterConnection(unsetClientZookeeperQuorum(),
-          new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), userProvider.getCurrent());
+        ClusterConnectionFactory.createAsyncClusterConnection(conf, localAddress, user);
     }
   }
 
@@ -1130,15 +1098,6 @@ public class HRegionServer extends HasThread implements
       LOG.info("stopping server " + this.serverName);
     }
 
-    if (this.connection != null && !connection.isClosed()) {
-      try {
-        this.connection.close();
-      } catch (IOException e) {
-        // Although the {@link Closeable} interface throws an {@link
-        // IOException}, in reality, the implementation would never do that.
-        LOG.warn("Attempt to close server's short circuit ClusterConnection failed.", e);
-      }
-    }
     if (this.asyncClusterConnection != null) {
       try {
         this.asyncClusterConnection.close();
@@ -2203,7 +2162,7 @@ public class HRegionServer extends HasThread implements
 
   @Override
   public Connection getConnection() {
-    return this.connection;
+    return getAsyncConnection().toConnection();
   }
 
   @Override
@@ -2309,8 +2268,8 @@ public class HRegionServer extends HasThread implements
           }
         } else {
           try {
-            MetaTableAccessor.updateRegionLocation(connection,
-              hris[0], serverName, openSeqNum, masterSystemTime);
+            MetaTableAccessor.updateRegionLocation(asyncClusterConnection.toConnection(), hris[0],
+              serverName, openSeqNum, masterSystemTime);
           } catch (IOException e) {
             LOG.info("Failed to update meta", e);
             return false;
@@ -2340,7 +2299,7 @@ public class HRegionServer extends HasThread implements
     // Keep looping till we get an error. We want to send reports even though server is going down.
     // Only go down if clusterConnection is null. It is set to null almost as last thing as the
     // HRegionServer does down.
-    while (this.connection != null && !this.connection.isClosed()) {
+    while (this.asyncClusterConnection != null && !this.asyncClusterConnection.isClosed()) {
       RegionServerStatusService.BlockingInterface rss = rssStub;
       try {
         if (rss == null) {
@@ -3834,7 +3793,7 @@ public class HRegionServer extends HasThread implements
 
   @Override
   public void unassign(byte[] regionName) throws IOException {
-    connection.getAdmin().unassign(regionName, false);
+    FutureUtils.get(asyncClusterConnection.getAdmin().unassign(regionName, false));
   }
 
   @Override
@@ -3883,8 +3842,7 @@ public class HRegionServer extends HasThread implements
   @Override
   public Connection createConnection(Configuration conf) throws IOException {
     User user = UserProvider.instantiate(conf).getCurrent();
-    return ConnectionUtils.createShortCircuitConnection(conf, null, user, this.serverName,
-        this.rpcServices, this.rpcServices);
+    return ConnectionFactory.createConnection(conf, null, user);
   }
 
   public void executeProcedure(long procId, RSProcedureCallable callable) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 16fd332..1506ed5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.RawCellBuilder;
 import org.apache.hadoop.hbase.RawCellBuilderFactory;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.SharedConnection;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
@@ -52,6 +51,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.SharedConnection;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
 import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
index 42a4e00..f15312a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
@@ -25,8 +25,8 @@ import com.google.protobuf.Service;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.SharedConnection;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.SharedConnection;
 import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 1dfd7e6..0ebca1b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2219,15 +2219,14 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
     if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) {
       // Otherwise, if the requestor has ADMIN or CREATE privs for all listed tables, the
       // request can be granted.
-      TableName [] sns = null;
       try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) {
-        sns = admin.listTableNames();
-        if (sns == null) return;
-        for (TableName tableName: tableNamesList) {
+        for (TableName tableName : tableNamesList) {
           // Skip checks for a table that does not exist
-          if (!admin.tableExists(tableName)) continue;
-          requirePermission(ctx, "getTableDescriptors", tableName, null, null,
-            Action.ADMIN, Action.CREATE);
+          if (!admin.tableExists(tableName)) {
+            continue;
+          }
+          requirePermission(ctx, "getTableDescriptors", tableName, null, null, Action.ADMIN,
+            Action.CREATE);
         }
       }
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
deleted file mode 100644
index d095fa3..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Row;
-import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Provides ability to create multiple Connection instances and allows to process a batch of
- * actions using CHTable.doBatchWithCallback()
- */
-@InterfaceAudience.Private
-public class MultiHConnection {
-  private static final Logger LOG = LoggerFactory.getLogger(MultiHConnection.class);
-  private Connection[] connections;
-  private final Object connectionsLock =  new Object();
-  private final int noOfConnections;
-  private ExecutorService batchPool;
-
-  /**
-   * Create multiple Connection instances and initialize a thread pool executor
-   * @param conf configuration
-   * @param noOfConnections total no of Connections to create
-   * @throws IOException if IO failure occurs
-   */
-  public MultiHConnection(Configuration conf, int noOfConnections)
-      throws IOException {
-    this.noOfConnections = noOfConnections;
-    synchronized (this.connectionsLock) {
-      connections = new Connection[noOfConnections];
-      for (int i = 0; i < noOfConnections; i++) {
-        Connection conn = ConnectionFactory.createConnection(conf);
-        connections[i] = conn;
-      }
-    }
-    createBatchPool(conf);
-  }
-
-  /**
-   * Close the open connections and shutdown the batchpool
-   */
-  public void close() {
-    synchronized (connectionsLock) {
-      if (connections != null) {
-        for (Connection conn : connections) {
-          if (conn != null) {
-            try {
-              conn.close();
-            } catch (IOException e) {
-              LOG.info("Got exception in closing connection", e);
-            } finally {
-              conn = null;
-            }
-          }
-        }
-        connections = null;
-      }
-    }
-    if (this.batchPool != null && !this.batchPool.isShutdown()) {
-      this.batchPool.shutdown();
-      try {
-        if (!this.batchPool.awaitTermination(10, TimeUnit.SECONDS)) {
-          this.batchPool.shutdownNow();
-        }
-      } catch (InterruptedException e) {
-        this.batchPool.shutdownNow();
-      }
-    }
-
-  }
-
-  /**
-   * Randomly pick a connection and process the batch of actions for a given table
-   * @param actions the actions
-   * @param tableName table name
-   * @param results the results array
-   * @param callback to run when results are in
-   * @throws IOException If IO failure occurs
-   */
-  public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName,
-      Object[] results, Batch.Callback<R> callback) throws IOException {
-    // Currently used by RegionStateStore
-    HTable.doBatchWithCallback(actions, results, callback,
-      connections[ThreadLocalRandom.current().nextInt(noOfConnections)], batchPool, tableName);
-  }
-
-  // Copied from ConnectionImplementation.getBatchPool()
-  // We should get rid of this when Connection.processBatchCallback is un-deprecated and provides
-  // an API to manage a batch pool
-  private void createBatchPool(Configuration conf) {
-    // Use the same config for keep alive as in ConnectionImplementation.getBatchPool();
-    int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256);
-    if (maxThreads == 0) {
-      maxThreads = Runtime.getRuntime().availableProcessors() * 8;
-    }
-    long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60);
-    LinkedBlockingQueue<Runnable> workQueue =
-        new LinkedBlockingQueue<>(maxThreads
-            * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-              HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-    ThreadPoolExecutor tpe =
-        new ThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue,
-            Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-"));
-    tpe.allowCoreThreadTimeOut(true);
-    this.batchPool = tpe;
-  }
-  
-}
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index e52d8b2..14d233a 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -31,6 +31,7 @@
   import="java.util.TreeMap"
   import="org.apache.commons.lang3.StringEscapeUtils"
   import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.hbase.HTableDescriptor"
   import="org.apache.hadoop.hbase.HColumnDescriptor"
   import="org.apache.hadoop.hbase.HConstants"
   import="org.apache.hadoop.hbase.HRegionLocation"
@@ -131,7 +132,7 @@
 if ( fqtn != null ) {
   try {
   table = master.getConnection().getTable(TableName.valueOf(fqtn));
-  if (table.getTableDescriptor().getRegionReplication() > 1) {
+  if (table.getDescriptor().getRegionReplication() > 1) {
     tableHeader = "<h2>Table Regions</h2><table id=\"tableRegionTable\" class=\"tablesorter table table-striped\" style=\"table-layout: fixed; word-wrap: break-word;\"><thead><tr><th>Name</th><th>Region Server</th><th>ReadRequests</th><th>WriteRequests</th><th>StorefileSize</th><th>Num.Storefiles</th><th>MemSize</th><th>Locality</th><th>Start Key</th><th>End Key</th><th>ReplicaID</th></tr></thead>";
     withReplica = true;
   } else {
@@ -365,7 +366,7 @@ if ( fqtn != null ) {
       <th></th>
   </tr>
   <%
-    Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
+    Collection<HColumnDescriptor> families = new HTableDescriptor(table.getDescriptor()).getFamilies();
     for (HColumnDescriptor family: families) {
   %>
   <tr>
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 805613d..58a3f10 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -63,7 +63,9 @@ import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
 import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
@@ -118,6 +120,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -154,6 +157,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.impl.Log4jLoggerAdapter;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 /**
@@ -211,10 +216,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
     * HBaseTestingUtility*/
   private Path dataTestDirOnTestFS = null;
 
-  /**
-   * Shared cluster connection.
-   */
-  private volatile Connection connection;
+  private volatile AsyncClusterConnection asyncConnection;
 
   /** Filesystem URI used for map-reduce mini-cluster setup */
   private static String FS_URI;
@@ -1206,9 +1208,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       hbaseAdmin.close();
       hbaseAdmin = null;
     }
-    if (this.connection != null) {
-      this.connection.close();
-      this.connection = null;
+    if (this.asyncConnection != null) {
+      this.asyncConnection.close();
+      this.asyncConnection = null;
     }
     this.hbaseCluster = new MiniHBaseCluster(this.conf, 1, servers, ports, null, null);
     // Don't leave here till we've done a successful scan of the hbase:meta
@@ -1289,14 +1291,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
 
   // close hbase admin, close current connection and reset MIN MAX configs for RS.
   private void cleanup() throws IOException {
-    if (hbaseAdmin != null) {
-      hbaseAdmin.close();
-      hbaseAdmin = null;
-    }
-    if (this.connection != null) {
-      this.connection.close();
-      this.connection = null;
-    }
+    closeConnection();
     // unset the configuration for MIN and MAX RS to start
     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
@@ -3004,17 +2999,35 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
     return hbaseCluster;
   }
 
+  private void initConnection() throws IOException {
+    User user = UserProvider.instantiate(conf).getCurrent();
+    this.asyncConnection = ClusterConnectionFactory.createAsyncClusterConnection(conf, null, user);
+  }
+
   /**
-   * Get a Connection to the cluster.
-   * Not thread-safe (This class needs a lot of work to make it thread-safe).
+   * Get a Connection to the cluster. Not thread-safe (This class needs a lot of work to make it
+   * thread-safe).
    * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
-   * @throws IOException
    */
   public Connection getConnection() throws IOException {
-    if (this.connection == null) {
-      this.connection = ConnectionFactory.createConnection(this.conf);
+    if (this.asyncConnection == null) {
+      initConnection();
     }
-    return this.connection;
+    return this.asyncConnection.toConnection();
+  }
+
+  public AsyncClusterConnection getAsyncConnection() throws IOException {
+    if (this.asyncConnection == null) {
+      initConnection();
+    }
+    return this.asyncConnection;
+  }
+
+  public void closeConnection() throws IOException {
+    Closeables.close(hbaseAdmin, true);
+    Closeables.close(asyncConnection, true);
+    this.hbaseAdmin = null;
+    this.asyncConnection = null;
   }
 
   /**
@@ -3186,36 +3199,30 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
    * Wait until all regions in a table have been assigned
    * @param table Table to wait on.
    * @param timeoutMillis Timeout.
-   * @throws InterruptedException
-   * @throws IOException
    */
   public void waitTableAvailable(byte[] table, long timeoutMillis)
-  throws InterruptedException, IOException {
+      throws InterruptedException, IOException {
     waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table)));
   }
 
   public String explainTableAvailability(TableName tableName) throws IOException {
     String msg = explainTableState(tableName, TableState.State.ENABLED) + ", ";
     if (getHBaseCluster().getMaster().isAlive()) {
-      Map<RegionInfo, ServerName> assignments =
-          getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
-              .getRegionAssignments();
+      Map<RegionInfo, ServerName> assignments = getHBaseCluster().getMaster().getAssignmentManager()
+        .getRegionStates().getRegionAssignments();
       final List<Pair<RegionInfo, ServerName>> metaLocations =
-          MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
+        MetaTableAccessor.getTableRegionsAndLocations(asyncConnection.toConnection(), tableName);
       for (Pair<RegionInfo, ServerName> metaLocation : metaLocations) {
         RegionInfo hri = metaLocation.getFirst();
         ServerName sn = metaLocation.getSecond();
         if (!assignments.containsKey(hri)) {
-          msg += ", region " + hri
-              + " not assigned, but found in meta, it expected to be on " + sn;
+          msg += ", region " + hri + " not assigned, but found in meta, it expected to be on " + sn;
 
         } else if (sn == null) {
-          msg += ",  region " + hri
-              + " assigned,  but has no server in meta";
+          msg += ",  region " + hri + " assigned,  but has no server in meta";
         } else if (!sn.equals(assignments.get(hri))) {
-          msg += ",  region " + hri
-              + " assigned,  but has different servers in meta and AM ( " +
-              sn + " <> " + assignments.get(hri);
+          msg += ",  region " + hri + " assigned,  but has different servers in meta and AM ( " +
+            sn + " <> " + assignments.get(hri);
         }
       }
     }
@@ -3224,10 +3231,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
 
   public String explainTableState(final TableName table, TableState.State state)
       throws IOException {
-    TableState tableState = MetaTableAccessor.getTableState(connection, table);
+    TableState tableState = MetaTableAccessor.getTableState(asyncConnection.toConnection(), table);
     if (tableState == null) {
-      return "TableState in META: No table state in META for table " + table
-          + " last state in meta (including deleted is " + findLastTableState(table) + ")";
+      return "TableState in META: No table state in META for table " + table +
+        " last state in meta (including deleted is " + findLastTableState(table) + ")";
     } else if (!tableState.inStates(state)) {
       return "TableState in META: Not " + state + " state, but " + tableState;
     } else {
@@ -3241,18 +3248,18 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
     MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
       @Override
       public boolean visit(Result r) throws IOException {
-        if (!Arrays.equals(r.getRow(), table.getName()))
+        if (!Arrays.equals(r.getRow(), table.getName())) {
           return false;
+        }
         TableState state = MetaTableAccessor.getTableState(r);
-        if (state != null)
+        if (state != null) {
           lastTableState.set(state);
+        }
         return true;
       }
     };
-    MetaTableAccessor
-        .scanMeta(connection, null, null,
-            MetaTableAccessor.QueryType.TABLE,
-            Integer.MAX_VALUE, visitor);
+    MetaTableAccessor.scanMeta(asyncConnection.toConnection(), null, null,
+      MetaTableAccessor.QueryType.TABLE, Integer.MAX_VALUE, visitor);
     return lastTableState.get();
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
index d1df8f0..fe88d6b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
@@ -29,7 +29,6 @@ import java.util.ArrayList;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Set;
-import org.apache.hadoop.hbase.client.ClientScanner;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -569,7 +568,6 @@ public class TestPartialResultsFromClientSide {
   /**
    * @param resultSizeRowLimit The row limit that will be enforced through maxResultSize
    * @param cachingRowLimit The row limit that will be enforced through caching
-   * @throws Exception
    */
   public void testPartialResultsAndCaching(int resultSizeRowLimit, int cachingRowLimit)
       throws Exception {
@@ -585,19 +583,16 @@ public class TestPartialResultsFromClientSide {
     scan.setMaxResultSize(maxResultSize);
     scan.setCaching(cachingRowLimit);
 
-    ResultScanner scanner = TABLE.getScanner(scan);
-    ClientScanner clientScanner = (ClientScanner) scanner;
-    Result r = null;
-
-    // Approximate the number of rows we expect will fit into the specified max rsult size. If this
-    // approximation is less than caching, then we expect that the max result size limit will be
-    // hit before the caching limit and thus partial results may be seen
-    boolean expectToSeePartialResults = resultSizeRowLimit < cachingRowLimit;
-    while ((r = clientScanner.next()) != null) {
-      assertTrue(!r.mayHaveMoreCellsInRow() || expectToSeePartialResults);
+    try (ResultScanner scanner = TABLE.getScanner(scan)) {
+      Result r = null;
+      // Approximate the number of rows we expect will fit into the specified max rsult size. If
+      // this approximation is less than caching, then we expect that the max result size limit will
+      // be hit before the caching limit and thus partial results may be seen
+      boolean expectToSeePartialResults = resultSizeRowLimit < cachingRowLimit;
+      while ((r = scanner.next()) != null) {
+        assertTrue(!r.mayHaveMoreCellsInRow() || expectToSeePartialResults);
+      }
     }
-
-    scanner.close();
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
index da84f2f..906d458 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
@@ -93,7 +93,7 @@ public class TestServerSideScanMetricsFromClientSide {
     TABLE = createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE);
   }
 
-  static Table createTestTable(TableName name, byte[][] rows, byte[][] families,
+  private static Table createTestTable(TableName name, byte[][] rows, byte[][] families,
       byte[][] qualifiers, byte[] cellValue) throws IOException {
     Table ht = TEST_UTIL.createTable(name, families);
     List<Put> puts = createPuts(rows, families, qualifiers, cellValue);
@@ -109,14 +109,8 @@ public class TestServerSideScanMetricsFromClientSide {
 
   /**
    * Make puts to put the input value into each combination of row, family, and qualifier
-   * @param rows
-   * @param families
-   * @param qualifiers
-   * @param value
-   * @return
-   * @throws IOException
    */
-  static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
+  private static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
       byte[] value) throws IOException {
     Put put;
     ArrayList<Put> puts = new ArrayList<>();
@@ -139,7 +133,6 @@ public class TestServerSideScanMetricsFromClientSide {
    * @return The approximate heap size of a cell in the test table. All cells should have
    *         approximately the same heap size, so the value is cached to avoid repeating the
    *         calculation
-   * @throws Exception
    */
   private long getCellHeapSize() throws Exception {
     if (CELL_HEAP_SIZE == -1) {
@@ -163,21 +156,11 @@ public class TestServerSideScanMetricsFromClientSide {
   }
 
   @Test
-  public void testRowsSeenMetricWithSync() throws Exception {
-    testRowsSeenMetric(false);
-  }
-
-  @Test
-  public void testRowsSeenMetricWithAsync() throws Exception {
-    testRowsSeenMetric(true);
-  }
-
-  private void testRowsSeenMetric(boolean async) throws Exception {
+  public void testRowsSeenMetric() throws Exception {
     // Base scan configuration
     Scan baseScan;
     baseScan = new Scan();
     baseScan.setScanMetricsEnabled(true);
-    baseScan.setAsyncPrefetch(async);
     testRowsSeenMetric(baseScan);
 
     // Test case that only a single result will be returned per RPC to the serer
@@ -196,7 +179,7 @@ public class TestServerSideScanMetricsFromClientSide {
     testRowsSeenMetric(baseScan);
   }
 
-  public void testRowsSeenMetric(Scan baseScan) throws Exception {
+  private void testRowsSeenMetric(Scan baseScan) throws Exception {
     Scan scan;
     scan = new Scan(baseScan);
     testMetric(scan, ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME, NUM_ROWS);
@@ -263,7 +246,7 @@ public class TestServerSideScanMetricsFromClientSide {
     testRowsSeenMetric(baseScan);
   }
 
-  public void testRowsFilteredMetric(Scan baseScan) throws Exception {
+  private void testRowsFilteredMetric(Scan baseScan) throws Exception {
     testRowsFilteredMetric(baseScan, null, 0);
 
     // Row filter doesn't match any row key. All rows should be filtered
@@ -315,34 +298,32 @@ public class TestServerSideScanMetricsFromClientSide {
     testRowsFilteredMetric(baseScan, filter, ROWS.length);
   }
 
-  public void testRowsFilteredMetric(Scan baseScan, Filter filter, int expectedNumFiltered)
+  private void testRowsFilteredMetric(Scan baseScan, Filter filter, int expectedNumFiltered)
       throws Exception {
     Scan scan = new Scan(baseScan);
-    if (filter != null) scan.setFilter(filter);
-    testMetric(scan, ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME, expectedNumFiltered);
+    if (filter != null) {
+      scan.setFilter(filter);
+    }
+    testMetric(scan, ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME,
+      expectedNumFiltered);
   }
 
   /**
-   * Run the scan to completetion and check the metric against the specified value
-   * @param scan
-   * @param metricKey
-   * @param expectedValue
-   * @throws Exception
+   * Run the scan to completion and check the metric against the specified value
    */
-  public void testMetric(Scan scan, String metricKey, long expectedValue) throws Exception {
+  private void testMetric(Scan scan, String metricKey, long expectedValue) throws Exception {
     assertTrue("Scan should be configured to record metrics", scan.isScanMetricsEnabled());
     ResultScanner scanner = TABLE.getScanner(scan);
     // Iterate through all the results
     while (scanner.next() != null) {
-
     }
     scanner.close();
     ScanMetrics metrics = scanner.getScanMetrics();
     assertTrue("Metrics are null", metrics != null);
     assertTrue("Metric : " + metricKey + " does not exist", metrics.hasCounter(metricKey));
     final long actualMetricValue = metrics.getCounter(metricKey).get();
-    assertEquals("Metric: " + metricKey + " Expected: " + expectedValue + " Actual: "
-        + actualMetricValue, expectedValue, actualMetricValue);
-
+    assertEquals(
+      "Metric: " + metricKey + " Expected: " + expectedValue + " Actual: " + actualMetricValue,
+      expectedValue, actualMetricValue);
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
index 618fe74..c267ba2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
@@ -25,6 +25,7 @@ import static org.mockito.Mockito.mock;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CountDownLatch;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.DummyAsyncRegistry;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
 import org.apache.hadoop.hbase.master.cleaner.CleanerChore;
@@ -80,7 +82,7 @@ public class TestZooKeeperTableArchiveClient {
       HBaseClassTestRule.forClass(TestZooKeeperTableArchiveClient.class);
 
   private static final Logger LOG = LoggerFactory.getLogger(TestZooKeeperTableArchiveClient.class);
-  private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU();
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
   private static final String STRING_TABLE_NAME = "test";
   private static final byte[] TEST_FAM = Bytes.toBytes("fam");
   private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
@@ -89,6 +91,17 @@ public class TestZooKeeperTableArchiveClient {
   private static Connection CONNECTION;
   private static RegionServerServices rss;
 
+  public static final class MockRegistry extends DummyAsyncRegistry {
+
+    public MockRegistry(Configuration conf) {
+    }
+
+    @Override
+    public CompletableFuture<String> getClusterId() {
+      return CompletableFuture.completedFuture("clusterId");
+    }
+  }
+
   /**
    * Setup the config for the cluster
    */
@@ -96,6 +109,8 @@ public class TestZooKeeperTableArchiveClient {
   public static void setupCluster() throws Exception {
     setupConf(UTIL.getConfiguration());
     UTIL.startMiniZKCluster();
+    UTIL.getConfiguration().setClass("hbase.client.registry.impl", MockRegistry.class,
+      DummyAsyncRegistry.class);
     CONNECTION = ConnectionFactory.createConnection(UTIL.getConfiguration());
     archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION);
     // make hfile archiving node so we can archive files
@@ -377,7 +392,7 @@ public class TestZooKeeperTableArchiveClient {
         if (counter[0] >= expected) finished.countDown();
         return ret;
       }
-    }).when(delegateSpy).getDeletableFiles(Mockito.anyListOf(FileStatus.class));
+    }).when(delegateSpy).getDeletableFiles(Mockito.anyList());
     cleaners.set(0, delegateSpy);
 
     return finished;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java
index d109108..d1f0e1a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java
@@ -28,7 +28,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Based class for testing operation timeout logic for {@link ConnectionImplementation}.
+ * Based class for testing operation timeout logic.
  */
 public abstract class AbstractTestCIOperationTimeout extends AbstractTestCITimeout {
 
@@ -73,7 +73,7 @@ public abstract class AbstractTestCIOperationTimeout extends AbstractTestCITimeo
       SleepAndFailFirstTime.ct.set(0);
       execute(table);
       fail("We expect an exception here");
-    } catch (SocketTimeoutException | RetriesExhaustedWithDetailsException e) {
+    } catch (SocketTimeoutException | RetriesExhaustedException e) {
       // The client has a CallTimeout class, but it's not shared. We're not very clean today,
       // in the general case you can expect the call to stop, but the exception may vary.
       // In this test however, we're sure that it will be a socket timeout.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java
index 89696cf..aedb814 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java
@@ -29,7 +29,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Based class for testing rpc timeout logic for {@link ConnectionImplementation}.
+ * Based class for testing rpc timeout logic.
  */
 public abstract class AbstractTestCIRpcTimeout extends AbstractTestCITimeout {
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java
index 49e0f56..33e7fe4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java
@@ -38,7 +38,7 @@ import org.junit.Rule;
 import org.junit.rules.TestName;
 
 /**
- * Based class for testing timeout logic for {@link ConnectionImplementation}.
+ * Based class for testing timeout logic.
  */
 public abstract class AbstractTestCITimeout {
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
index d5fc58e..95afa64 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
@@ -152,4 +152,9 @@ public class DummyAsyncClusterConnection implements AsyncClusterConnection {
   public CompletableFuture<Void> cleanupBulkLoad(TableName tableName, String bulkToken) {
     return null;
   }
+
+  @Override
+  public Connection toConnection() {
+    return null;
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index efdf187..8e9afed 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -71,6 +72,8 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
 
@@ -79,15 +82,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTable
  * Spins up the minicluster once at test start and then takes it down afterward.
  * Add any testing of HBaseAdmin functionality here.
  */
-@Category({LargeTests.class, ClientTests.class})
+@Category({ LargeTests.class, ClientTests.class })
 public class TestAdmin1 {
 
   @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestAdmin1.class);
+  public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin1.class);
 
   private static final Logger LOG = LoggerFactory.getLogger(TestAdmin1.class);
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static ConnectionImplementation CONN;
   private static Admin ADMIN;
 
   @Rule
@@ -101,12 +104,16 @@ public class TestAdmin1 {
     TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
     TEST_UTIL.startMiniCluster(3);
     ADMIN = TEST_UTIL.getAdmin();
+    CONN = ConnectionFactory.createConnectionImpl(TEST_UTIL.getConfiguration(), null,
+      UserProvider.instantiate(TEST_UTIL.getConfiguration()).getCurrent());
   }
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
+    Closeables.close(CONN, true);
     TEST_UTIL.shutdownMiniCluster();
   }
+
   @After
   public void tearDown() throws Exception {
     for (TableDescriptor htd : ADMIN.listTableDescriptors()) {
@@ -702,7 +709,7 @@ public class TestAdmin1 {
       ADMIN.createTable(desc, Bytes.toBytes("a"), Bytes.toBytes("z"), 2);
       fail("Should not be able to create a table with only 2 regions using this API.");
     } catch (IllegalArgumentException eae) {
-    // Expected
+      // Expected
     }
 
     TableName TABLE_5 = TableName.valueOf(tableName.getNameAsString() + "_5");
@@ -741,7 +748,6 @@ public class TestAdmin1 {
     List<HRegionLocation> regions;
     Iterator<HRegionLocation> hris;
     RegionInfo hri;
-    ConnectionImplementation conn = (ConnectionImplementation) TEST_UTIL.getConnection();
     try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
       regions = l.getAllRegionLocations();
 
@@ -781,7 +787,7 @@ public class TestAdmin1 {
       assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[8]));
       assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);
 
-      verifyRoundRobinDistribution(conn, l, expectedRegions);
+      verifyRoundRobinDistribution(CONN, l, expectedRegions);
     }
 
 
@@ -841,7 +847,7 @@ public class TestAdmin1 {
       assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
       assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);
 
-      verifyRoundRobinDistribution(conn, l, expectedRegions);
+      verifyRoundRobinDistribution(CONN, l, expectedRegions);
     }
 
     // Try once more with something that divides into something infinite
@@ -864,7 +870,7 @@ public class TestAdmin1 {
           "but only found " + regions.size(), expectedRegions, regions.size());
       System.err.println("Found " + regions.size() + " regions");
 
-      verifyRoundRobinDistribution(conn, l, expectedRegions);
+      verifyRoundRobinDistribution(CONN, l, expectedRegions);
     }
 
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 6852718..58a8bc5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -72,6 +73,8 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 /**
@@ -87,7 +90,8 @@ public class TestAdmin2 {
       HBaseClassTestRule.forClass(TestAdmin2.class);
 
   private static final Logger LOG = LoggerFactory.getLogger(TestAdmin2.class);
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static ConnectionImplementation CONN;
   private static Admin ADMIN;
 
   @Rule
@@ -102,11 +106,14 @@ public class TestAdmin2 {
     TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 30);
     TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
     TEST_UTIL.startMiniCluster(3);
+    CONN = ConnectionFactory.createConnectionImpl(TEST_UTIL.getConfiguration(), null,
+      UserProvider.instantiate(TEST_UTIL.getConfiguration()).getCurrent());
     ADMIN = TEST_UTIL.getAdmin();
   }
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
+    Closeables.close(CONN, true);
     TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -282,7 +289,7 @@ public class TestAdmin2 {
   /**
    * Can't enable a table if the table isn't in disabled state
    */
-  @Test (expected=TableNotDisabledException.class)
+  @Test(expected = TableNotDisabledException.class)
   public void testTableNotDisabledExceptionWithATable() throws IOException {
     final TableName name = TableName.valueOf(this.name.getMethodName());
     try (Table t = TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY)) {
@@ -768,12 +775,10 @@ public class TestAdmin2 {
     long expectedStoreFilesSize = store.getStorefilesSize();
     Assert.assertNotNull(store);
     Assert.assertEquals(expectedStoreFilesSize, store.getSize());
-
-    ConnectionImplementation conn = (ConnectionImplementation) ADMIN.getConnection();
-    HBaseRpcController controller = conn.getRpcControllerFactory().newController();
+    HBaseRpcController controller = CONN.getRpcControllerFactory().newController();
     for (int i = 0; i < 10; i++) {
       RegionInfo ri =
-          ProtobufUtil.getRegionInfo(controller, conn.getAdmin(rs.getServerName()), regionName);
+          ProtobufUtil.getRegionInfo(controller, CONN.getAdmin(rs.getServerName()), regionName);
       Assert.assertEquals(region.getRegionInfo(), ri);
 
       // Make sure that the store size is still the actual file system's store size.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java
index 6c9c257..66d3d3d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java
@@ -24,6 +24,8 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.HBaseRpcControllerImpl;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -33,6 +35,7 @@ import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -48,7 +51,7 @@ public class TestAlwaysSetScannerId {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestAlwaysSetScannerId.class);
+    HBaseClassTestRule.forClass(TestAlwaysSetScannerId.class);
 
   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
 
@@ -62,7 +65,9 @@ public class TestAlwaysSetScannerId {
 
   private static RegionInfo HRI;
 
-  private static ClientProtos.ClientService.BlockingInterface STUB;
+  private static AsyncConnectionImpl CONN;
+
+  private static ClientProtos.ClientService.Interface STUB;
 
   @BeforeClass
   public static void setUp() throws Exception {
@@ -73,38 +78,46 @@ public class TestAlwaysSetScannerId {
       }
     }
     HRI = UTIL.getAdmin().getRegions(TABLE_NAME).get(0);
-    STUB = ((ConnectionImplementation) UTIL.getConnection())
-        .getClient(UTIL.getHBaseCluster().getRegionServer(0).getServerName());
+    CONN =
+      (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get();
+    STUB = CONN.getRegionServerStub(UTIL.getHBaseCluster().getRegionServer(0).getServerName());
   }
 
   @AfterClass
   public static void tearDown() throws Exception {
+    Closeables.close(CONN, true);
     UTIL.shutdownMiniCluster();
   }
 
+  private ScanResponse scan(ScanRequest req) throws IOException {
+    BlockingRpcCallback<ScanResponse> callback = new BlockingRpcCallback<>();
+    STUB.scan(new HBaseRpcControllerImpl(), req, callback);
+    return callback.get();
+  }
+
   @Test
   public void test() throws ServiceException, IOException {
     Scan scan = new Scan();
     ScanRequest req = RequestConverter.buildScanRequest(HRI.getRegionName(), scan, 1, false);
-    ScanResponse resp = STUB.scan(null, req);
+    ScanResponse resp = scan(req);
     assertTrue(resp.hasScannerId());
     long scannerId = resp.getScannerId();
     int nextCallSeq = 0;
     // test next
     for (int i = 0; i < COUNT / 2; i++) {
       req = RequestConverter.buildScanRequest(scannerId, 1, false, nextCallSeq++, false, false, -1);
-      resp = STUB.scan(null, req);
+      resp = scan(req);
       assertTrue(resp.hasScannerId());
       assertEquals(scannerId, resp.getScannerId());
     }
     // test renew
     req = RequestConverter.buildScanRequest(scannerId, 0, false, nextCallSeq++, false, true, -1);
-    resp = STUB.scan(null, req);
+    resp = scan(req);
     assertTrue(resp.hasScannerId());
     assertEquals(scannerId, resp.getScannerId());
     // test close
     req = RequestConverter.buildScanRequest(scannerId, 0, true, false);
-    resp = STUB.scan(null, req);
+    resp = scan(req);
     assertTrue(resp.hasScannerId());
     assertEquals(scannerId, resp.getScannerId());
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 0f08f44..3a2e8f9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -274,7 +274,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
 
   private void verifyRoundRobinDistribution(List<HRegionLocation> regions, int expectedRegions)
       throws IOException {
-    int numRS = ((ConnectionImplementation) TEST_UTIL.getConnection()).getCurrentNrHRS();
+    int numRS = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().size();
 
     Map<ServerName, List<RegionInfo>> server2Regions = new HashMap<>();
     regions.stream().forEach((loc) -> {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
index 31c01c0..c8c8036 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -291,6 +292,12 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
     }
   }
 
+  /**
+   * TODO: not sure what do we test here but seems the test can not work together with async
+   * prefetch scanner. Ignore it for now, as after HBASE-21879 is landed we will have a more natural
+   * way to deal with reference counting...
+   */
+  @Ignore
   @Test
   public void testHBASE16372InReadPath() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIBadHostname.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIBadHostname.java
index 83d4bfa..e046afa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIBadHostname.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIBadHostname.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.junit.AfterClass;
@@ -43,28 +44,29 @@ public class TestCIBadHostname {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestCIBadHostname.class);
 
-  private static HBaseTestingUtility testUtil;
-  private static ConnectionImplementation conn;
+  private static HBaseTestingUtility TEST_UTIL;
+  private static ConnectionImplementation CONN;
 
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
-    testUtil = HBaseTestingUtility.createLocalHTU();
-    testUtil.startMiniCluster();
-    conn = (ConnectionImplementation) testUtil.getConnection();
+    TEST_UTIL = HBaseTestingUtility.createLocalHTU();
+    TEST_UTIL.startMiniCluster();
+    CONN = ConnectionFactory.createConnectionImpl(TEST_UTIL.getConfiguration(), null,
+      UserProvider.instantiate(TEST_UTIL.getConfiguration()).getCurrent());
   }
 
   @AfterClass
   public static void teardownAfterClass() throws Exception {
-    conn.close();
-    testUtil.shutdownMiniCluster();
+    CONN.close();
+    TEST_UTIL.shutdownMiniCluster();
   }
 
   @Test(expected = UnknownHostException.class)
   public void testGetAdminBadHostname() throws Exception {
     // verify that we can get an instance with the cluster hostname
-    ServerName master = testUtil.getHBaseCluster().getMaster().getServerName();
+    ServerName master = TEST_UTIL.getHBaseCluster().getMaster().getServerName();
     try {
-      conn.getAdmin(master);
+      CONN.getAdmin(master);
     } catch (UnknownHostException uhe) {
       fail("Obtaining admin to the cluster master should have succeeded");
     }
@@ -74,16 +76,16 @@ public class TestCIBadHostname {
     ServerName badHost =
         ServerName.valueOf("unknownhost.invalid:" + HConstants.DEFAULT_MASTER_PORT,
         System.currentTimeMillis());
-    conn.getAdmin(badHost);
+    CONN.getAdmin(badHost);
     fail("Obtaining admin to unresolvable hostname should have failed");
   }
 
   @Test(expected = UnknownHostException.class)
   public void testGetClientBadHostname() throws Exception {
     // verify that we can get an instance with the cluster hostname
-    ServerName rs = testUtil.getHBaseCluster().getRegionServer(0).getServerName();
+    ServerName rs = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName();
     try {
-      conn.getClient(rs);
+      CONN.getClient(rs);
     } catch (UnknownHostException uhe) {
       fail("Obtaining client to the cluster regionserver should have succeeded");
     }
@@ -93,7 +95,7 @@ public class TestCIBadHostname {
     ServerName badHost =
         ServerName.valueOf("unknownhost.invalid:" + HConstants.DEFAULT_REGIONSERVER_PORT,
         System.currentTimeMillis());
-    conn.getAdmin(badHost);
+    CONN.getAdmin(badHost);
     fail("Obtaining client to unresolvable hostname should have failed");
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
index fd0eb7b..d914912 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
@@ -20,13 +20,13 @@ package org.apache.hadoop.hbase.client;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.net.SocketTimeoutException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.junit.Before;
@@ -81,7 +81,7 @@ public class TestCISleep extends AbstractTestCITimeout {
         // Beacuse 2s + 3s + 2s > 6s
         table.get(new Get(FAM_NAM));
         fail("We expect an exception here");
-      } catch (SocketTimeoutException e) {
+      } catch (RetriesExhaustedException e) {
         LOG.info("We received an exception, as expected ", e);
       }
     }
@@ -93,8 +93,10 @@ public class TestCISleep extends AbstractTestCITimeout {
     long baseTime = 100;
     final TableName tableName = TableName.valueOf(name.getMethodName());
     TEST_UTIL.createTable(tableName, FAM_NAM);
-    ClientServiceCallable<Object> regionServerCallable =
-      new ClientServiceCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
+    try (ConnectionImplementation conn =
+      ConnectionFactory.createConnectionImpl(TEST_UTIL.getConfiguration(), null,
+        UserProvider.instantiate(TEST_UTIL.getConfiguration()).getCurrent())) {
+      ClientServiceCallable<Object> regionServerCallable = new ClientServiceCallable<Object>(conn,
         tableName, FAM_NAM, new RpcControllerFactory(TEST_UTIL.getConfiguration()).newController(),
         HConstants.PRIORITY_UNSET) {
         @Override
@@ -103,42 +105,41 @@ public class TestCISleep extends AbstractTestCITimeout {
         }
       };
 
-    regionServerCallable.prepare(false);
-    for (int i = 0; i < HConstants.RETRY_BACKOFF.length; i++) {
-      pauseTime = regionServerCallable.sleep(baseTime, i);
-      assertTrue(pauseTime >= (baseTime * HConstants.RETRY_BACKOFF[i]));
-      assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f));
-    }
-
-    RegionAdminServiceCallable<Object> regionAdminServiceCallable =
-      new RegionAdminServiceCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
-        new RpcControllerFactory(TEST_UTIL.getConfiguration()), tableName, FAM_NAM) {
-        @Override
-        public Object call(HBaseRpcController controller) throws Exception {
-          return null;
-        }
-      };
-
-    regionAdminServiceCallable.prepare(false);
-    for (int i = 0; i < HConstants.RETRY_BACKOFF.length; i++) {
-      pauseTime = regionAdminServiceCallable.sleep(baseTime, i);
-      assertTrue(pauseTime >= (baseTime * HConstants.RETRY_BACKOFF[i]));
-      assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f));
-    }
+      regionServerCallable.prepare(false);
+      for (int i = 0; i < HConstants.RETRY_BACKOFF.length; i++) {
+        pauseTime = regionServerCallable.sleep(baseTime, i);
+        assertTrue(pauseTime >= (baseTime * HConstants.RETRY_BACKOFF[i]));
+        assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f));
+      }
+      RegionAdminServiceCallable<Object> regionAdminServiceCallable =
+        new RegionAdminServiceCallable<Object>(conn,
+          new RpcControllerFactory(TEST_UTIL.getConfiguration()), tableName, FAM_NAM) {
+          @Override
+          public Object call(HBaseRpcController controller) throws Exception {
+            return null;
+          }
+        };
 
-    try (MasterCallable<Object> masterCallable =
-      new MasterCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
-        new RpcControllerFactory(TEST_UTIL.getConfiguration())) {
-        @Override
-        protected Object rpcCall() throws Exception {
-          return null;
-        }
-      }) {
+      regionAdminServiceCallable.prepare(false);
       for (int i = 0; i < HConstants.RETRY_BACKOFF.length; i++) {
-        pauseTime = masterCallable.sleep(baseTime, i);
+        pauseTime = regionAdminServiceCallable.sleep(baseTime, i);
         assertTrue(pauseTime >= (baseTime * HConstants.RETRY_BACKOFF[i]));
         assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f));
       }
+
+      try (MasterCallable<Object> masterCallable =
+        new MasterCallable<Object>(conn, new RpcControllerFactory(TEST_UTIL.getConfiguration())) {
+          @Override
+          protected Object rpcCall() throws Exception {
+            return null;
+          }
+        }) {
+        for (int i = 0; i < HConstants.RETRY_BACKOFF.length; i++) {
+          pauseTime = masterCallable.sleep(baseTime, i);
+          assertTrue(pauseTime >= (baseTime * HConstants.RETRY_BACKOFF[i]));
+          assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f));
+        }
+      }
     }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
index 15ef065..53353d2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -135,18 +137,16 @@ public class TestCheckAndMutate {
       // get row back and assert the values
       getOneRowAndAssertAllButCExist(table);
 
-      //Test that we get a region level exception
+      // Test that we get a region level exception
       try {
         rm = getBogusRowMutations();
         table.checkAndMutate(ROWKEY, FAMILY).qualifier(Bytes.toBytes("A"))
-            .ifEquals(Bytes.toBytes("a")).thenMutate(rm);
+          .ifEquals(Bytes.toBytes("a")).thenMutate(rm);
         fail("Expected NoSuchColumnFamilyException");
-      } catch (RetriesExhaustedWithDetailsException e) {
-        try {
-          throw e.getCause(0);
-        } catch (NoSuchColumnFamilyException e1) {
-          // expected
-        }
+      } catch (NoSuchColumnFamilyException e) {
+        // expected
+      } catch (RetriesExhaustedException e) {
+        assertThat(e.getCause(), instanceOf(NoSuchColumnFamilyException.class));
       }
     }
   }
@@ -168,18 +168,16 @@ public class TestCheckAndMutate {
       // get row back and assert the values
       getOneRowAndAssertAllButCExist(table);
 
-      //Test that we get a region level exception
+      // Test that we get a region level exception
       try {
         rm = getBogusRowMutations();
         table.checkAndMutate(ROWKEY, FAMILY).qualifier(Bytes.toBytes("A"))
-            .ifEquals(Bytes.toBytes("a")).thenMutate(rm);
+          .ifEquals(Bytes.toBytes("a")).thenMutate(rm);
         fail("Expected NoSuchColumnFamilyException");
-      } catch (RetriesExhaustedWithDetailsException e) {
-        try {
-          throw e.getCause(0);
-        } catch (NoSuchColumnFamilyException e1) {
-          // expected
-        }
+      } catch (NoSuchColumnFamilyException e) {
+        // expected
+      } catch (RetriesExhaustedException e) {
+        assertThat(e.getCause(), instanceOf(NoSuchColumnFamilyException.class));
       }
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
index e789349..b8994a8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
@@ -25,6 +25,7 @@ import java.util.concurrent.atomic.AtomicLong;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -48,7 +49,9 @@ public class TestClientPushback extends ClientPushbackTestBase {
 
   @Before
   public void setUp() throws IOException {
-    conn = (ConnectionImplementation) ConnectionFactory.createConnection(UTIL.getConfiguration());
+    conn =
+      (ConnectionImplementation) ConnectionFactory.createConnectionImpl(UTIL.getConfiguration(),
+        null, UserProvider.instantiate(UTIL.getConfiguration()).getCurrent());
     mutator = (BufferedMutatorImpl) conn.getBufferedMutator(tableName);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
index 8a4c065..148d99f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
@@ -83,7 +83,10 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 /**
  * This class is for testing HBaseConnectionManager features
+ * <p/>
+ * Will be removed in the future, ignore.
  */
+@Ignore
 @Category({LargeTests.class})
 public class TestConnectionImplementation {
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 894f8a7..6d27044 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -17,12 +17,14 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -181,31 +183,23 @@ public class TestFromClientSide {
     // Client will retry beacuse rpc timeout is small than the sleep time of first rpc call
     c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500);
 
-    Connection connection = ConnectionFactory.createConnection(c);
-    try (Table t = connection.getTable(TableName.valueOf(name.getMethodName()))) {
-      if (t instanceof HTable) {
-        HTable table = (HTable) t;
-        table.setOperationTimeout(3 * 1000);
-
-        try {
-          Append append = new Append(ROW);
-          append.addColumn(HBaseTestingUtility.fam1, QUALIFIER, VALUE);
-          Result result = table.append(append);
-
-          // Verify expected result
-          Cell[] cells = result.rawCells();
-          assertEquals(1, cells.length);
-          assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
-
-          // Verify expected result again
-          Result readResult = table.get(new Get(ROW));
-          cells = readResult.rawCells();
-          assertEquals(1, cells.length);
-          assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
-        } finally {
-          connection.close();
-        }
-      }
+    try (Connection connection = ConnectionFactory.createConnection(c);
+        Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null)
+          .setOperationTimeout(3 * 1000).build()) {
+      Append append = new Append(ROW);
+      append.addColumn(HBaseTestingUtility.fam1, QUALIFIER, VALUE);
+      Result result = table.append(append);
+
+      // Verify expected result
+      Cell[] cells = result.rawCells();
+      assertEquals(1, cells.length);
+      assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
+
+      // Verify expected result again
+      Result readResult = table.get(new Get(ROW));
+      cells = readResult.rawCells();
+      assertEquals(1, cells.length);
+      assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
     }
   }
 
@@ -2247,13 +2241,9 @@ public class TestFromClientSide {
   @Test
   public void testBatchOperationsWithErrors() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] {FAMILY}, 10)) {
+    try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 10)) {
 
       int NUM_OPS = 100;
-      int FAILED_OPS = 50;
-
-      RetriesExhaustedWithDetailsException expectedException = null;
-      IllegalArgumentException iae = null;
 
       // 1.1 Put with no column families (local validation, runtime exception)
       List<Put> puts = new ArrayList<Put>(NUM_OPS);
@@ -2264,16 +2254,15 @@ public class TestFromClientSide {
 
       try {
         foo.put(puts);
+        fail();
       } catch (IllegalArgumentException e) {
-        iae = e;
+        // expected
+        assertEquals(NUM_OPS, puts.size());
       }
-      assertNotNull(iae);
-      assertEquals(NUM_OPS, puts.size());
 
       // 1.2 Put with invalid column family
-      iae = null;
       puts.clear();
-      for (int i = 0; i != NUM_OPS; i++) {
+      for (int i = 0; i < NUM_OPS; i++) {
         Put put = new Put(Bytes.toBytes(i));
         put.addColumn((i % 2) == 0 ? FAMILY : INVALID_FAMILY, FAMILY, Bytes.toBytes(i));
         puts.add(put);
@@ -2281,47 +2270,46 @@ public class TestFromClientSide {
 
       try {
         foo.put(puts);
-      } catch (RetriesExhaustedWithDetailsException e) {
-        expectedException = e;
+        fail();
+      } catch (RetriesExhaustedException e) {
+        // expected
+        assertThat(e.getCause(), instanceOf(NoSuchColumnFamilyException.class));
       }
-      assertNotNull(expectedException);
-      assertEquals(FAILED_OPS, expectedException.exceptions.size());
-      assertTrue(expectedException.actions.contains(puts.get(1)));
 
       // 2.1 Get non-existent rows
       List<Get> gets = new ArrayList<>(NUM_OPS);
       for (int i = 0; i < NUM_OPS; i++) {
         Get get = new Get(Bytes.toBytes(i));
-        // get.addColumn(FAMILY, FAMILY);
         gets.add(get);
       }
       Result[] getsResult = foo.get(gets);
-
       assertNotNull(getsResult);
       assertEquals(NUM_OPS, getsResult.length);
-      assertNull(getsResult[1].getRow());
+      for (int i = 0; i < NUM_OPS; i++) {
+        Result getResult = getsResult[i];
+        if (i % 2 == 0) {
+          assertFalse(getResult.isEmpty());
+        } else {
+          assertTrue(getResult.isEmpty());
+        }
+      }
 
       // 2.2 Get with invalid column family
       gets.clear();
-      getsResult = null;
-      expectedException = null;
       for (int i = 0; i < NUM_OPS; i++) {
         Get get = new Get(Bytes.toBytes(i));
         get.addColumn((i % 2) == 0 ? FAMILY : INVALID_FAMILY, FAMILY);
         gets.add(get);
       }
       try {
-        getsResult = foo.get(gets);
-      } catch (RetriesExhaustedWithDetailsException e) {
-        expectedException = e;
+        foo.get(gets);
+        fail();
+      } catch (RetriesExhaustedException e) {
+        // expected
+        assertThat(e.getCause(), instanceOf(NoSuchColumnFamilyException.class));
       }
-      assertNull(getsResult);
-      assertNotNull(expectedException);
-      assertEquals(FAILED_OPS, expectedException.exceptions.size());
-      assertTrue(expectedException.actions.contains(gets.get(1)));
 
       // 3.1 Delete with invalid column family
-      expectedException = null;
       List<Delete> deletes = new ArrayList<>(NUM_OPS);
       for (int i = 0; i < NUM_OPS; i++) {
         Delete delete = new Delete(Bytes.toBytes(i));
@@ -2330,14 +2318,24 @@ public class TestFromClientSide {
       }
       try {
         foo.delete(deletes);
-      } catch (RetriesExhaustedWithDetailsException e) {
-        expectedException = e;
+        fail();
+      } catch (RetriesExhaustedException e) {
+        // expected
+        assertThat(e.getCause(), instanceOf(NoSuchColumnFamilyException.class));
       }
-      assertEquals((NUM_OPS - FAILED_OPS), deletes.size());
-      assertNotNull(expectedException);
-      assertEquals(FAILED_OPS, expectedException.exceptions.size());
-      assertTrue(expectedException.actions.contains(deletes.get(1)));
 
+      // all valid rows should have been deleted
+      gets.clear();
+      for (int i = 0; i < NUM_OPS; i++) {
+        Get get = new Get(Bytes.toBytes(i));
+        gets.add(get);
+      }
+      getsResult = foo.get(gets);
+      assertNotNull(getsResult);
+      assertEquals(NUM_OPS, getsResult.length);
+      for (Result getResult : getsResult) {
+        assertTrue(getResult.isEmpty());
+      }
 
       // 3.2 Delete non-existent rows
       deletes.clear();
@@ -2346,61 +2344,9 @@ public class TestFromClientSide {
         deletes.add(delete);
       }
       foo.delete(deletes);
-
-      assertTrue(deletes.isEmpty());
     }
   }
 
-  /*
-   * Baseline "scalability" test.
-   *
-   * Tests one hundred families, one million columns, one million versions
-   */
-  @Ignore @Test
-  public void testMillions() throws Exception {
-
-    // 100 families
-
-    // millions of columns
-
-    // millions of versions
-
-  }
-
-  @Ignore @Test
-  public void testMultipleRegionsAndBatchPuts() throws Exception {
-    // Two family table
-
-    // Insert lots of rows
-
-    // Insert to the same row with batched puts
-
-    // Insert to multiple rows with batched puts
-
-    // Split the table
-
-    // Get row from first region
-
-    // Get row from second region
-
-    // Scan all rows
-
-    // Insert to multiple regions with batched puts
-
-    // Get row from first region
-
-    // Get row from second region
-
-    // Scan all rows
-
-
-  }
-
-  @Ignore @Test
-  public void testMultipleRowMultipleFamily() throws Exception {
-
-  }
-
   //
   // JIRA Testers
   //
@@ -4373,37 +4319,33 @@ public class TestFromClientSide {
       // to be reloaded.
 
       // Test user metadata
-      try (Admin admin = TEST_UTIL.getAdmin()) {
-        // make a modifiable descriptor
-        HTableDescriptor desc = new HTableDescriptor(a.getTableDescriptor());
-        // offline the table
-        admin.disableTable(tableAname);
-        // add a user attribute to HTD
-        desc.setValue(attrName, attrValue);
-        // add a user attribute to HCD
-        for (HColumnDescriptor c : desc.getFamilies()) {
-          c.setValue(attrName, attrValue);
-        }
-        // update metadata for all regions of this table
-        admin.modifyTable(desc);
-        // enable the table
-        admin.enableTable(tableAname);
-      }
+      Admin admin = TEST_UTIL.getAdmin();
+      // make a modifiable descriptor
+      HTableDescriptor desc = new HTableDescriptor(a.getDescriptor());
+      // offline the table
+      admin.disableTable(tableAname);
+      // add a user attribute to HTD
+      desc.setValue(attrName, attrValue);
+      // add a user attribute to HCD
+      for (HColumnDescriptor c : desc.getFamilies())
+        c.setValue(attrName, attrValue);
+      // update metadata for all regions of this table
+      admin.modifyTable(desc);
+      // enable the table
+      admin.enableTable(tableAname);
 
       // Test that attribute changes were applied
-      HTableDescriptor desc = a.getTableDescriptor();
+      desc = new HTableDescriptor(a.getDescriptor());
       assertEquals("wrong table descriptor returned", desc.getTableName(), tableAname);
       // check HTD attribute
       value = desc.getValue(attrName);
       assertFalse("missing HTD attribute value", value == null);
-      assertFalse("HTD attribute value is incorrect",
-              Bytes.compareTo(value, attrValue) != 0);
+      assertFalse("HTD attribute value is incorrect", Bytes.compareTo(value, attrValue) != 0);
       // check HCD attribute
       for (HColumnDescriptor c : desc.getFamilies()) {
         value = c.getValue(attrName);
         assertFalse("missing HCD attribute value", value == null);
-        assertFalse("HCD attribute value is incorrect",
-                Bytes.compareTo(value, attrValue) != 0);
+        assertFalse("HCD attribute value is incorrect", Bytes.compareTo(value, attrValue) != 0);
       }
     }
   }
@@ -4571,9 +4513,7 @@ public class TestFromClientSide {
     LOG.info("Starting testRowMutation");
     final TableName tableName = TableName.valueOf(name.getMethodName());
     try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) {
-      byte[][] QUALIFIERS = new byte[][]{
-              Bytes.toBytes("a"), Bytes.toBytes("b")
-      };
+      byte[][] QUALIFIERS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b") };
       RowMutations arm = new RowMutations(ROW);
       Put p = new Put(ROW);
       p.addColumn(FAMILY, QUALIFIERS[0], VALUE);
@@ -4591,20 +4531,22 @@ public class TestFromClientSide {
       Delete d = new Delete(ROW);
       d.addColumns(FAMILY, QUALIFIERS[0]);
       arm.add(d);
-      // TODO: Trying mutateRow again.  The batch was failing with a one try only.
+      // TODO: Trying mutateRow again. The batch was failing with a one try only.
       t.mutateRow(arm);
       r = t.get(g);
       assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1])));
       assertNull(r.getValue(FAMILY, QUALIFIERS[0]));
 
-      //Test that we get a region level exception
+      // Test that we get a region level exception
       try {
         arm = new RowMutations(ROW);
         p = new Put(ROW);
-        p.addColumn(new byte[]{'b', 'o', 'g', 'u', 's'}, QUALIFIERS[0], VALUE);
+        p.addColumn(new byte[] { 'b', 'o', 'g', 'u', 's' }, QUALIFIERS[0], VALUE);
         arm.add(p);
         t.mutateRow(arm);
         fail("Expected NoSuchColumnFamilyException");
+      } catch (NoSuchColumnFamilyException e) {
+        return;
       } catch (RetriesExhaustedWithDetailsException e) {
         for (Throwable rootCause : e.getCauses()) {
           if (rootCause instanceof NoSuchColumnFamilyException) {
@@ -4724,14 +4666,11 @@ public class TestFromClientSide {
       for (int j = 0; j != resultWithWal.rawCells().length; ++j) {
         Cell cellWithWal = resultWithWal.rawCells()[j];
         Cell cellWithoutWal = resultWithoutWal.rawCells()[j];
-        assertTrue(Bytes.equals(CellUtil.cloneRow(cellWithWal),
-                CellUtil.cloneRow(cellWithoutWal)));
-        assertTrue(Bytes.equals(CellUtil.cloneFamily(cellWithWal),
-                CellUtil.cloneFamily(cellWithoutWal)));
-        assertTrue(Bytes.equals(CellUtil.cloneQualifier(cellWithWal),
-                CellUtil.cloneQualifier(cellWithoutWal)));
-        assertTrue(Bytes.equals(CellUtil.cloneValue(cellWithWal),
-                CellUtil.cloneValue(cellWithoutWal)));
+        assertArrayEquals(CellUtil.cloneRow(cellWithWal), CellUtil.cloneRow(cellWithoutWal));
+        assertArrayEquals(CellUtil.cloneFamily(cellWithWal), CellUtil.cloneFamily(cellWithoutWal));
+        assertArrayEquals(CellUtil.cloneQualifier(cellWithWal),
+          CellUtil.cloneQualifier(cellWithoutWal));
+        assertArrayEquals(CellUtil.cloneValue(cellWithWal), CellUtil.cloneValue(cellWithoutWal));
       }
     }
   }
@@ -6473,6 +6412,8 @@ public class TestFromClientSide {
     }
   }
 
+  // to be removed
+  @Ignore
   @Test
   public void testRegionCache() throws IOException {
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index f32123d..83becbc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -47,6 +47,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -55,6 +57,8 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.ipc.RpcClientFactory;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -77,9 +81,6 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-
 @Category({LargeTests.class, ClientTests.class})
 public class TestFromClientSide3 {
 
@@ -146,24 +147,11 @@ public class TestFromClientSide3 {
     table.put(put);
   }
 
-  private void performMultiplePutAndFlush(HBaseAdmin admin, Table table,
-      byte[] row, byte[] family, int nFlushes, int nPuts)
-  throws Exception {
-
-    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(table.getName())) {
-      // connection needed for poll-wait
-      HRegionLocation loc = locator.getRegionLocation(row, true);
-      AdminProtos.AdminService.BlockingInterface server =
-        ((ConnectionImplementation) admin.getConnection()).getAdmin(loc.getServerName());
-      byte[] regName = loc.getRegionInfo().getRegionName();
-
-      for (int i = 0; i < nFlushes; i++) {
-        randomCFPuts(table, row, family, nPuts);
-        List<String> sf = ProtobufUtil.getStoreFiles(server, regName, FAMILY);
-        int sfCount = sf.size();
-
-        admin.flush(table.getName());
-      }
+  private void performMultiplePutAndFlush(Admin admin, Table table, byte[] row, byte[] family,
+      int nFlushes, int nPuts) throws Exception {
+    for (int i = 0; i < nFlushes; i++) {
+      randomCFPuts(table, row, family, nPuts);
+      admin.flush(table.getName());
     }
   }
 
@@ -259,6 +247,16 @@ public class TestFromClientSide3 {
     }
   }
 
+  private int getStoreFileCount(Admin admin, ServerName serverName, RegionInfo region)
+      throws IOException {
+    for (RegionMetrics metrics : admin.getRegionMetrics(serverName, region.getTable())) {
+      if (Bytes.equals(region.getRegionName(), metrics.getRegionName())) {
+        return metrics.getStoreFileCount();
+      }
+    }
+    return 0;
+  }
+
   // override the config settings at the CF level and ensure priority
   @Test
   public void testAdvancedConfigOverride() throws Exception {
@@ -273,104 +271,94 @@ public class TestFromClientSide3 {
      */
     TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);
 
-    try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) {
+    final TableName tableName = TableName.valueOf(name.getMethodName());
+    try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10)) {
       TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS);
-      try (Admin admin = TEST_UTIL.getAdmin()) {
-        ConnectionImplementation connection = (ConnectionImplementation) TEST_UTIL.getConnection();
-
-        // Create 3 store files.
-        byte[] row = Bytes.toBytes(random.nextInt());
-        performMultiplePutAndFlush((HBaseAdmin) admin, table, row, FAMILY, 3, 100);
-
-        try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
-          // Verify we have multiple store files.
-          HRegionLocation loc = locator.getRegionLocation(row, true);
-          byte[] regionName = loc.getRegionInfo().getRegionName();
-          AdminProtos.AdminService.BlockingInterface server =
-                  connection.getAdmin(loc.getServerName());
-          assertTrue(ProtobufUtil.getStoreFiles(server, regionName, FAMILY).size() > 1);
-
-          // Issue a compaction request
-          admin.compact(tableName);
-
-          // poll wait for the compactions to happen
-          for (int i = 0; i < 10 * 1000 / 40; ++i) {
-            // The number of store files after compaction should be lesser.
-            loc = locator.getRegionLocation(row, true);
-            if (!loc.getRegionInfo().isOffline()) {
-              regionName = loc.getRegionInfo().getRegionName();
-              server = connection.getAdmin(loc.getServerName());
-              if (ProtobufUtil.getStoreFiles(server, regionName, FAMILY).size() <= 1) {
-                break;
-              }
-            }
-            Thread.sleep(40);
-          }
-          // verify the compactions took place and that we didn't just time out
-          assertTrue(ProtobufUtil.getStoreFiles(server, regionName, FAMILY).size() <= 1);
+      Admin admin = TEST_UTIL.getAdmin();
 
-          // change the compaction.min config option for this table to 5
-          LOG.info("hbase.hstore.compaction.min should now be 5");
-          HTableDescriptor htd = new HTableDescriptor(table.getTableDescriptor());
-          htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
-          admin.modifyTable(htd);
-          LOG.info("alter status finished");
+      // Create 3 store files.
+      byte[] row = Bytes.toBytes(random.nextInt());
+      performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 100);
 
-          // Create 3 more store files.
-          performMultiplePutAndFlush((HBaseAdmin) admin, table, row, FAMILY, 3, 10);
+      try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
+        // Verify we have multiple store files.
+        HRegionLocation loc = locator.getRegionLocation(row, true);
+        assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) > 1);
 
-          // Issue a compaction request
-          admin.compact(tableName);
+        // Issue a compaction request
+        admin.compact(tableName);
 
-          // This time, the compaction request should not happen
-          Thread.sleep(10 * 1000);
+        // poll wait for the compactions to happen
+        for (int i = 0; i < 10 * 1000 / 40; ++i) {
+          // The number of store files after compaction should be lesser.
           loc = locator.getRegionLocation(row, true);
-          regionName = loc.getRegionInfo().getRegionName();
-          server = connection.getAdmin(loc.getServerName());
-          int sfCount = ProtobufUtil.getStoreFiles(server, regionName, FAMILY).size();
-          assertTrue(sfCount > 1);
-
-          // change an individual CF's config option to 2 & online schema update
-          LOG.info("hbase.hstore.compaction.min should now be 2");
-          HColumnDescriptor hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
-          hcd.setValue("hbase.hstore.compaction.min", String.valueOf(2));
-          htd.modifyFamily(hcd);
-          admin.modifyTable(htd);
-          LOG.info("alter status finished");
-
-          // Issue a compaction request
-          admin.compact(tableName);
-
-          // poll wait for the compactions to happen
-          for (int i = 0; i < 10 * 1000 / 40; ++i) {
-            loc = locator.getRegionLocation(row, true);
-            regionName = loc.getRegionInfo().getRegionName();
-            try {
-              server = connection.getAdmin(loc.getServerName());
-              if (ProtobufUtil.getStoreFiles(server, regionName, FAMILY).size() < sfCount) {
-                break;
-              }
-            } catch (Exception e) {
-              LOG.debug("Waiting for region to come online: " + Bytes.toString(regionName));
+          if (!loc.getRegion().isOffline()) {
+            if (getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) <= 1) {
+              break;
             }
-            Thread.sleep(40);
           }
-
-          // verify the compaction took place and that we didn't just time out
-          assertTrue(ProtobufUtil.getStoreFiles(
-                  server, regionName, FAMILY).size() < sfCount);
-
-          // Finally, ensure that we can remove a custom config value after we made it
-          LOG.info("Removing CF config value");
-          LOG.info("hbase.hstore.compaction.min should now be 5");
-          hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
-          hcd.setValue("hbase.hstore.compaction.min", null);
-          htd.modifyFamily(hcd);
-          admin.modifyTable(htd);
-          LOG.info("alter status finished");
-          assertNull(table.getTableDescriptor().getFamily(FAMILY).getValue(
-                  "hbase.hstore.compaction.min"));
+          Thread.sleep(40);
         }
+        // verify the compactions took place and that we didn't just time out
+        assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) <= 1);
+
+        // change the compaction.min config option for this table to 5
+        LOG.info("hbase.hstore.compaction.min should now be 5");
+        HTableDescriptor htd = new HTableDescriptor(hTable.getDescriptor());
+        htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
+        admin.modifyTable(htd);
+        LOG.info("alter status finished");
+
+        // Create 3 more store files.
+        performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 10);
+
+        // Issue a compaction request
+        admin.compact(tableName);
+
+        // This time, the compaction request should not happen
+        Thread.sleep(10 * 1000);
+        loc = locator.getRegionLocation(row, true);
+        int sfCount = getStoreFileCount(admin, loc.getServerName(), loc.getRegion());
+        assertTrue(sfCount > 1);
+
+        // change an individual CF's config option to 2 & online schema update
+        LOG.info("hbase.hstore.compaction.min should now be 2");
+        HColumnDescriptor hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
+        hcd.setValue("hbase.hstore.compaction.min", String.valueOf(2));
+        htd.modifyFamily(hcd);
+        admin.modifyTable(htd);
+        LOG.info("alter status finished");
+
+        // Issue a compaction request
+        admin.compact(tableName);
+
+        // poll wait for the compactions to happen
+        for (int i = 0; i < 10 * 1000 / 40; ++i) {
+          loc = locator.getRegionLocation(row, true);
+          try {
+            if (getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) < sfCount) {
+              break;
+            }
+          } catch (Exception e) {
+            LOG.debug("Waiting for region to come online: " +
+              Bytes.toStringBinary(loc.getRegion().getRegionName()));
+          }
+          Thread.sleep(40);
+        }
+
+        // verify the compaction took place and that we didn't just time out
+        assertTrue(getStoreFileCount(admin, loc.getServerName(), loc.getRegion()) < sfCount);
+
+        // Finally, ensure that we can remove a custom config value after we made it
+        LOG.info("Removing CF config value");
+        LOG.info("hbase.hstore.compaction.min should now be 5");
+        hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
+        hcd.setValue("hbase.hstore.compaction.min", null);
+        htd.modifyFamily(hcd);
+        admin.modifyTable(htd);
+        LOG.info("alter status finished");
+        assertNull(hTable.getDescriptor().getColumnFamily(FAMILY)
+          .getValue(Bytes.toBytes("hbase.hstore.compaction.min")));
       }
     }
   }
@@ -453,7 +441,7 @@ public class TestFromClientSide3 {
                 new Put(ROW).addColumn(new byte[]{'b', 'o', 'g', 'u', 's'}, QUALIFIERS[0], VALUE)));
         table.batch(Arrays.asList(arm), batchResult);
         fail("Expected RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException");
-      } catch (RetriesExhaustedWithDetailsException e) {
+      } catch(RetriesExhaustedException e) {
         String msg = e.getMessage();
         assertTrue(msg.contains("NoSuchColumnFamilyException"));
       }
@@ -541,7 +529,7 @@ public class TestFromClientSide3 {
       getList.add(get);
       getList.add(get2);
 
-      boolean[] exists = table.existsAll(getList);
+      boolean[] exists = table.exists(getList);
       assertEquals(true, exists[0]);
       assertEquals(true, exists[1]);
 
@@ -593,7 +581,7 @@ public class TestFromClientSide3 {
       gets.add(new Get(Bytes.add(ANOTHERROW, new byte[]{0x00})));
 
       LOG.info("Calling exists");
-      boolean[] results = table.existsAll(gets);
+      boolean[] results = table.exists(gets);
       assertFalse(results[0]);
       assertFalse(results[1]);
       assertTrue(results[2]);
@@ -607,7 +595,7 @@ public class TestFromClientSide3 {
       gets = new ArrayList<>();
       gets.add(new Get(new byte[]{0x00}));
       gets.add(new Get(new byte[]{0x00, 0x00}));
-      results = table.existsAll(gets);
+      results = table.exists(gets);
       assertTrue(results[0]);
       assertFalse(results[1]);
 
@@ -620,7 +608,7 @@ public class TestFromClientSide3 {
       gets.add(new Get(new byte[]{(byte) 0xff}));
       gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff}));
       gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff, (byte) 0xff}));
-      results = table.existsAll(gets);
+      results = table.exists(gets);
       assertFalse(results[0]);
       assertTrue(results[1]);
       assertFalse(results[2]);
@@ -655,8 +643,10 @@ public class TestFromClientSide3 {
 
   @Test
   public void testConnectionDefaultUsesCodec() throws Exception {
-    ConnectionImplementation con = (ConnectionImplementation) TEST_UTIL.getConnection();
-    assertTrue(con.hasCellBlockSupport());
+    try (
+      RpcClient client = RpcClientFactory.createClient(TEST_UTIL.getConfiguration(), "cluster")) {
+      assertTrue(client.hasCellBlockSupport());
+    }
   }
 
   @Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java
index 3fb482d..f7f7450 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java
@@ -78,6 +78,7 @@ public class TestFromClientSideScanExcpetion {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
     conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 6000000);
     conf.setClass(HConstants.REGION_IMPL, MyHRegion.class, HRegion.class);
     conf.setBoolean("hbase.client.log.scanner.activity", true);
@@ -223,7 +224,6 @@ public class TestFromClientSideScanExcpetion {
   @Test
   public void testScannerFailsAfterRetriesWhenCoprocessorThrowsIOE()
       throws IOException, InterruptedException {
-    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
     TableName tableName = TableName.valueOf(name.getMethodName());
     reset();
     THROW_ONCE.set(false); // throw exceptions in every retry
@@ -233,11 +233,12 @@ public class TestFromClientSideScanExcpetion {
       inject();
       TEST_UTIL.countRows(t, new Scan().addColumn(FAMILY, FAMILY));
       fail("Should have thrown an exception");
-    } catch (DoNotRetryIOException expected) {
-      assertThat(expected, instanceOf(ScannerResetException.class));
+    } catch (ScannerResetException expected) {
+      // expected
+    } catch (RetriesExhaustedException e) {
       // expected
+      assertThat(e.getCause(), instanceOf(ScannerResetException.class));
     }
     assertTrue(REQ_COUNT.get() >= 3);
   }
-
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java
index 4186594..f2979e4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java
@@ -43,7 +43,6 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
 
@@ -115,10 +114,8 @@ public class TestGetProcedureResult {
 
   private GetProcedureResultResponse.State getState(long procId)
       throws MasterNotRunningException, IOException, ServiceException {
-    MasterProtos.MasterService.BlockingInterface master =
-      ((ConnectionImplementation) UTIL.getConnection()).getMaster();
-    GetProcedureResultResponse resp = master.getProcedureResult(null,
-      GetProcedureResultRequest.newBuilder().setProcId(procId).build());
+    GetProcedureResultResponse resp = UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices()
+      .getProcedureResult(null, GetProcedureResultRequest.newBuilder().setProcId(procId).build());
     return resp.getState();
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
index b1aba6a..58b30e4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
@@ -112,30 +112,23 @@ public class TestIncrementsFromClientSide {
     // Client will retry beacuse rpc timeout is small than the sleep time of first rpc call
     c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500);
 
-    Connection connection = ConnectionFactory.createConnection(c);
-    Table t = connection.getTable(TableName.valueOf(name.getMethodName()));
-    if (t instanceof HTable) {
-      HTable table = (HTable) t;
-      table.setOperationTimeout(3 * 1000);
-
-      try {
-        Increment inc = new Increment(ROW);
-        inc.addColumn(TEST_UTIL.fam1, QUALIFIER, 1);
-        Result result = table.increment(inc);
-
-        Cell [] cells = result.rawCells();
-        assertEquals(1, cells.length);
-        assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1);
-
-        // Verify expected result
-        Result readResult = table.get(new Get(ROW));
-        cells = readResult.rawCells();
-        assertEquals(1, cells.length);
-        assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1);
-      } finally {
-        table.close();
-        connection.close();
-      }
+
+    try (Connection connection = ConnectionFactory.createConnection(c);
+        Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null)
+          .setOperationTimeout(3 * 1000).build()) {
+      Increment inc = new Increment(ROW);
+      inc.addColumn(TEST_UTIL.fam1, QUALIFIER, 1);
+      Result result = table.increment(inc);
+
+      Cell[] cells = result.rawCells();
+      assertEquals(1, cells.length);
+      assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1);
+
+      // Verify expected result
+      Result readResult = table.get(new Get(ROW));
+      cells = readResult.rawCells();
+      assertEquals(1, cells.length);
+      assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1);
     }
   }
 
@@ -216,38 +209,36 @@ public class TestIncrementsFromClientSide {
   public void testIncrementInvalidArguments() throws Exception {
     LOG.info("Starting " + this.name.getMethodName());
     final TableName TABLENAME =
-        TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+      TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
     Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
     final byte[] COLUMN = Bytes.toBytes("column");
     try {
       // try null row
       ht.incrementColumnValue(null, FAMILY, COLUMN, 5);
-      fail("Should have thrown IOException");
-    } catch (IOException iox) {
+      fail("Should have thrown NPE/IOE");
+    } catch (NullPointerException | IOException error) {
       // success
     }
     try {
       // try null family
       ht.incrementColumnValue(ROW, null, COLUMN, 5);
-      fail("Should have thrown IOException");
-    } catch (IOException iox) {
+      fail("Should have thrown NPE/IOE");
+    } catch (NullPointerException | IOException error) {
       // success
     }
     // try null row
     try {
-      Increment incNoRow = new Increment((byte [])null);
+      Increment incNoRow = new Increment((byte[]) null);
       incNoRow.addColumn(FAMILY, COLUMN, 5);
-      fail("Should have thrown IllegalArgumentException");
-    } catch (IllegalArgumentException iax) {
-      // success
-    } catch (NullPointerException npe) {
+      fail("Should have thrown IAE/NPE");
+    } catch (IllegalArgumentException | NullPointerException error) {
       // success
     }
     // try null family
     try {
       Increment incNoFamily = new Increment(ROW);
       incNoFamily.addColumn(null, COLUMN, 5);
-      fail("Should have thrown IllegalArgumentException");
+      fail("Should have thrown IAE");
     } catch (IllegalArgumentException iax) {
       // success
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java
deleted file mode 100644
index dfe147d..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Arrays;
-import org.apache.hadoop.hbase.CompatibilityFactory;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ipc.MetricsHBaseServerSource;
-import org.apache.hadoop.hbase.test.MetricsAssertHelper;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Category(LargeTests.class)
-public class TestLeaseRenewal {
-
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestLeaseRenewal.class);
-
-  public MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class);
-
-  final Logger LOG = LoggerFactory.getLogger(getClass());
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static byte[] FAMILY = Bytes.toBytes("testFamily");
-  private static final byte[] ANOTHERROW = Bytes.toBytes("anotherrow");
-  private final static byte[] COL_QUAL = Bytes.toBytes("f1");
-  private final static byte[] VAL_BYTES = Bytes.toBytes("v1");
-  private final static byte[] ROW_BYTES = Bytes.toBytes("r1");
-  private final static int leaseTimeout =
-      HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD / 4;
-
-  @Rule
-  public TestName name = new TestName();
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
-      leaseTimeout);
-    TEST_UTIL.startMiniCluster();
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    // Nothing to do.
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    for (TableDescriptor htd : TEST_UTIL.getAdmin().listTableDescriptors()) {
-      LOG.info("Tear down, remove table=" + htd.getTableName());
-      TEST_UTIL.deleteTable(htd.getTableName());
-    }
-  }
-
-  @Test
-  public void testLeaseRenewal() throws Exception {
-    Table table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILY);
-    Put p = new Put(ROW_BYTES);
-    p.addColumn(FAMILY, COL_QUAL, VAL_BYTES);
-    table.put(p);
-    p = new Put(ANOTHERROW);
-    p.addColumn(FAMILY, COL_QUAL, VAL_BYTES);
-    table.put(p);
-    Scan s = new Scan();
-    s.setCaching(1);
-    ResultScanner rs = table.getScanner(s);
-    // we haven't open the scanner yet so nothing happens
-    assertFalse(rs.renewLease());
-    assertTrue(Arrays.equals(rs.next().getRow(), ANOTHERROW));
-    // renew the lease a few times, long enough to be sure
-    // the lease would have expired otherwise
-    Thread.sleep(leaseTimeout/2);
-    assertTrue(rs.renewLease());
-    Thread.sleep(leaseTimeout/2);
-    assertTrue(rs.renewLease());
-    Thread.sleep(leaseTimeout/2);
-    assertTrue(rs.renewLease());
-    // make sure we haven't advanced the scanner
-    assertTrue(Arrays.equals(rs.next().getRow(), ROW_BYTES));
-    // renewLease should return false now as we have read all the data already
-    assertFalse(rs.renewLease());
-    // make sure scanner is exhausted now
-    assertNull(rs.next());
-    // renewLease should return false now
-    assertFalse(rs.renewLease());
-    rs.close();
-    table.close();
-    MetricsHBaseServerSource serverSource = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0)
-        .getRpcServer().getMetrics().getMetricsSource();
-    HELPER.assertCounter("exceptions.OutOfOrderScannerNextException", 0, serverSource);
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java
index ef4ca25..ab7d070 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java
@@ -122,12 +122,8 @@ public class TestMalformedCellFromClient {
       try {
         table.batch(batches, results);
         fail("Where is the exception? We put the malformed cells!!!");
-      } catch (RetriesExhaustedWithDetailsException e) {
-        for (Throwable throwable : e.getCauses()) {
-          assertNotNull(throwable);
-        }
-        assertEquals(1, e.getNumExceptions());
-        exceptionByCaught = e.getCause(0);
+      } catch (RetriesExhaustedException e) {
+        exceptionByCaught = e.getCause();
       }
       for (Object obj : results) {
         assertNotNull(obj);
@@ -285,12 +281,14 @@ public class TestMalformedCellFromClient {
     try (Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME)) {
       table.batch(batches, objs);
       fail("Where is the exception? We put the malformed cells!!!");
-    } catch (RetriesExhaustedWithDetailsException e) {
-      assertEquals(2, e.getNumExceptions());
-      for (int i = 0; i != e.getNumExceptions(); ++i) {
-        assertNotNull(e.getCause(i));
-        assertEquals(DoNotRetryIOException.class, e.getCause(i).getClass());
-        assertEquals("fail", Bytes.toString(e.getRow(i).getRow()));
+    } catch (RetriesExhaustedException e) {
+      Throwable error = e.getCause();
+      for (;;) {
+        assertNotNull("Can not find a DoNotRetryIOException on stack trace", error);
+        if (error instanceof DoNotRetryIOException) {
+          break;
+        }
+        error = error.getCause();
       }
     } finally {
       assertObjects(objs, batches.size());
@@ -319,12 +317,14 @@ public class TestMalformedCellFromClient {
     try (Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME)) {
       table.batch(batches, objs);
       fail("Where is the exception? We put the malformed cells!!!");
-    } catch (RetriesExhaustedWithDetailsException e) {
-      assertEquals(1, e.getNumExceptions());
-      for (int i = 0; i != e.getNumExceptions(); ++i) {
-        assertNotNull(e.getCause(i));
-        assertTrue(e.getCause(i) instanceof IOException);
-        assertEquals("fail", Bytes.toString(e.getRow(i).getRow()));
+    } catch (RetriesExhaustedException e) {
+      Throwable error = e.getCause();
+      for (;;) {
+        assertNotNull("Can not find a DoNotRetryIOException on stack trace", error);
+        if (error instanceof DoNotRetryIOException) {
+          break;
+        }
+        error = error.getCause();
       }
     } finally {
       assertObjects(objs, batches.size());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
index 3870244..bb58930 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -59,6 +60,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 
+/**
+ * Will be removed along with ConnectionImplementation soon.
+ */
+@Ignore
 @Category({MediumTests.class, ClientTests.class})
 public class TestMetaCache {
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 1c06990..53b44ee 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
@@ -206,7 +207,9 @@ public class TestMetaWithReplicas {
     }
     byte[] row = Bytes.toBytes("test");
     ServerName master = null;
-    try (Connection c = ConnectionFactory.createConnection(conf)) {
+    try (
+      ConnectionImplementation c = ConnectionFactory.createConnectionImpl(util.getConfiguration(),
+        null, UserProvider.instantiate(util.getConfiguration()).getCurrent())) {
       try (Table htable = util.createTable(TABLE, FAMILIES)) {
         util.getAdmin().flush(TableName.META_TABLE_NAME);
         Thread.sleep(
@@ -335,8 +338,10 @@ public class TestMetaWithReplicas {
   public void testShutdownOfReplicaHolder() throws Exception {
     // checks that the when the server holding meta replica is shut down, the meta replica
     // can be recovered
-    try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
-        RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) {
+    try (
+      Connection conn = ConnectionFactory.createConnectionImpl(TEST_UTIL.getConfiguration(), null,
+        UserProvider.instantiate(TEST_UTIL.getConfiguration()).getCurrent());
+      RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) {
       HRegionLocation hrl = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true).get(1);
       ServerName oldServer = hrl.getServerName();
       TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java
index 349f052..9db29ce 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -49,7 +50,7 @@ public class TestMultiActionMetricsFromClient {
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL.startMiniCluster(1);
     TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster();
-    TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME.META_TABLE_NAME);
+    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
     TEST_UTIL.createTable(TABLE_NAME, FAMILY);
   }
 
@@ -62,12 +63,10 @@ public class TestMultiActionMetricsFromClient {
   public void testMultiMetrics() throws Exception {
     Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
     conf.set(MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY, "true");
-    ConnectionImplementation conn =
-      (ConnectionImplementation) ConnectionFactory.createConnection(conf);
-
-    try {
+    try (ConnectionImplementation conn = ConnectionFactory.createConnectionImpl(conf, null,
+      UserProvider.instantiate(conf).getCurrent())) {
       BufferedMutator mutator = conn.getBufferedMutator(TABLE_NAME);
-      byte[][] keys = {Bytes.toBytes("aaa"), Bytes.toBytes("mmm"), Bytes.toBytes("zzz")};
+      byte[][] keys = { Bytes.toBytes("aaa"), Bytes.toBytes("mmm"), Bytes.toBytes("zzz") };
       for (byte[] key : keys) {
         Put p = new Put(key);
         p.addColumn(FAMILY, QUALIFIER, Bytes.toBytes(10));
@@ -81,8 +80,6 @@ public class TestMultiActionMetricsFromClient {
       assertEquals(1, metrics.multiTracker.reqHist.getCount());
       assertEquals(3, metrics.numActionsPerServerHist.getSnapshot().getMean(), 1e-15);
       assertEquals(1, metrics.numActionsPerServerHist.getCount());
-    } finally {
-      conn.close();
     }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
index 50c9bd8..73270d4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
@@ -18,17 +18,14 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.hadoop.hbase.Cell;
@@ -37,8 +34,6 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.codec.KeyValueCodec;
@@ -48,6 +43,7 @@ import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.MasterObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -170,38 +166,6 @@ public class TestMultiParallel {
     return keys.toArray(new byte [][] {new byte [] {}});
   }
 
-
-  /**
-   * This is for testing the active number of threads that were used while
-   * doing a batch operation. It inserts one row per region via the batch
-   * operation, and then checks the number of active threads.
-   * <p/>
-   * For HBASE-3553
-   */
-  @Test
-  public void testActiveThreadsCount() throws Exception {
-    UTIL.getConfiguration().setLong("hbase.htable.threads.coresize", slaves + 1);
-    try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration())) {
-      ThreadPoolExecutor executor = HTable.getDefaultExecutor(UTIL.getConfiguration());
-      try {
-        try (Table t = connection.getTable(TEST_TABLE, executor)) {
-          List<Put> puts = constructPutRequests(); // creates a Put for every region
-          t.batch(puts, null);
-          HashSet<ServerName> regionservers = new HashSet<>();
-          try (RegionLocator locator = connection.getRegionLocator(TEST_TABLE)) {
-            for (Row r : puts) {
-              HRegionLocation location = locator.getRegionLocation(r.getRow());
-              regionservers.add(location.getServerName());
-            }
-          }
-          assertEquals(regionservers.size(), executor.getLargestPoolSize());
-        }
-      } finally {
-        executor.shutdownNow();
-      }
-    }
-  }
-
   @Test
   public void testBatchWithGet() throws Exception {
     LOG.info("test=testBatchWithGet");
@@ -256,14 +220,12 @@ public class TestMultiParallel {
 
     // row1 and row2 should be in the same region.
 
-    Object [] r = new Object[actions.size()];
+    Object[] r = new Object[actions.size()];
     try {
       table.batch(actions, r);
       fail();
-    } catch (RetriesExhaustedWithDetailsException ex) {
-      LOG.debug(ex.toString(), ex);
-      // good!
-      assertFalse(ex.mayHaveClusterIssues());
+    } catch (RetriesExhaustedException ex) {
+      // expected
     }
     assertEquals(2, r.length);
     assertTrue(r[0] instanceof Throwable);
@@ -434,7 +396,6 @@ public class TestMultiParallel {
       deletes.add(delete);
     }
     table.delete(deletes);
-    Assert.assertTrue(deletes.isEmpty());
 
     // Get to make sure ...
     for (byte[] k : KEYS) {
@@ -522,41 +483,44 @@ public class TestMultiParallel {
   @Test
   public void testNonceCollision() throws Exception {
     LOG.info("test=testNonceCollision");
-    final Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
-    Table table = connection.getTable(TEST_TABLE);
-    Put put = new Put(ONE_ROW);
-    put.addColumn(BYTES_FAMILY, QUALIFIER, Bytes.toBytes(0L));
+    try (
+      ConnectionImplementation connection =
+        ConnectionFactory.createConnectionImpl(UTIL.getConfiguration(), null,
+          UserProvider.instantiate(UTIL.getConfiguration()).getCurrent());
+      Table table = connection.getTable(TEST_TABLE)) {
+      Put put = new Put(ONE_ROW);
+      put.addColumn(BYTES_FAMILY, QUALIFIER, Bytes.toBytes(0L));
 
-    // Replace nonce manager with the one that returns each nonce twice.
-    NonceGenerator cnm = new NonceGenerator() {
+      // Replace nonce manager with the one that returns each nonce twice.
+      NonceGenerator cnm = new NonceGenerator() {
 
-      private final PerClientRandomNonceGenerator delegate = PerClientRandomNonceGenerator.get();
+        private final PerClientRandomNonceGenerator delegate = PerClientRandomNonceGenerator.get();
 
-      private long lastNonce = -1;
+        private long lastNonce = -1;
 
-      @Override
-      public synchronized long newNonce() {
-        long nonce = 0;
-        if (lastNonce == -1) {
-          lastNonce = nonce = delegate.newNonce();
-        } else {
-          nonce = lastNonce;
-          lastNonce = -1L;
+        @Override
+        public synchronized long newNonce() {
+          long nonce = 0;
+          if (lastNonce == -1) {
+            nonce = delegate.newNonce();
+            lastNonce = nonce;
+          } else {
+            nonce = lastNonce;
+            lastNonce = -1L;
+          }
+          return nonce;
         }
-        return nonce;
-      }
 
-      @Override
-      public long getNonceGroup() {
-        return delegate.getNonceGroup();
-      }
-    };
+        @Override
+        public long getNonceGroup() {
+          return delegate.getNonceGroup();
+        }
+      };
 
-    NonceGenerator oldCnm =
-      ConnectionUtils.injectNonceGeneratorForTesting((ConnectionImplementation) connection, cnm);
+      NonceGenerator oldCnm =
+        ConnectionUtils.injectNonceGeneratorForTesting((ConnectionImplementation) connection, cnm);
 
-    // First test sequential requests.
-    try {
+      // First test sequential requests.
       Increment inc = new Increment(ONE_ROW);
       inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
       table.increment(inc);
@@ -613,10 +577,6 @@ public class TestMultiParallel {
       get.addColumn(BYTES_FAMILY, QUALIFIER);
       result = table.get(get);
       validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L));
-      table.close();
-    } finally {
-      ConnectionImplementation.injectNonceGeneratorForTesting((ConnectionImplementation) connection,
-        oldCnm);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java
index bf54449..dec9f65 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static junit.framework.TestCase.assertEquals;
+import static org.junit.Assert.assertEquals;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -28,9 +28,7 @@ import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CompatibilityFactory;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -41,6 +39,8 @@ import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -71,9 +71,10 @@ public class TestMultiRespectsLimits {
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.getConfiguration().setLong(
-        HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY,
-        MAX_SIZE);
+    // disable the debug log to avoid flooding the output
+    LogManager.getLogger(AsyncRegionLocatorHelper.class).setLevel(Level.INFO);
+    TEST_UTIL.getConfiguration().setLong(HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY,
+      MAX_SIZE);
 
     // Only start on regionserver so that all regions are on the same server.
     TEST_UTIL.startMiniCluster(1);
@@ -126,11 +127,9 @@ public class TestMultiRespectsLimits {
   @Test
   public void testBlockMultiLimits() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    HTableDescriptor desc = new HTableDescriptor(tableName);
-    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
-    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
-    desc.addFamily(hcd);
-    TEST_UTIL.getAdmin().createTable(desc);
+    TEST_UTIL.getAdmin().createTable(
+      TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder
+        .newBuilder(FAMILY).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF).build()).build());
     Table t = TEST_UTIL.getConnection().getTable(tableName);
 
     final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java
index b877ad7..44efcbc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java
@@ -33,9 +33,14 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+/**
+ * To be rewrite to check async meta cache.
+ */
+@Ignore
 @Category({MediumTests.class, ClientTests.class})
 public class TestRegionLocationCaching {
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index d53353e..61d4b86 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -60,6 +60,7 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
@@ -254,9 +255,6 @@ public class TestReplicaWithCluster {
     HTU.getConfiguration().setInt("hbase.client.primaryCallTimeout.get", 1000000);
     HTU.getConfiguration().setInt("hbase.client.primaryCallTimeout.scan", 1000000);
 
-    // Retry less so it can fail faster
-    HTU.getConfiguration().setInt("hbase.client.retries.number", 1);
-
     // Enable meta replica at server side
     HTU.getConfiguration().setInt("hbase.meta.replica.count", 2);
 
@@ -646,6 +644,8 @@ public class TestReplicaWithCluster {
   // This test is to test when hbase.client.metaReplicaCallTimeout.scan is configured, meta table
   // scan will always get the result from primary meta region as long as the result is returned
   // within configured hbase.client.metaReplicaCallTimeout.scan from primary meta region.
+  // To be rewrite, and meta replicas is not stable
+  @Ignore
   @Test
   public void testGetRegionLocationFromPrimaryMetaRegion() throws IOException, InterruptedException {
     HTU.getAdmin().balancerSwitch(false, true);
@@ -679,6 +679,8 @@ public class TestReplicaWithCluster {
   // are down, hbase client is able to access user replica regions and return stale data.
   // Meta replica is enabled to show the case that the meta replica region could be out of sync
   // with the primary meta region.
+  // To be rewrite, and meta replicas is not stable
+  @Ignore
   @Test
   public void testReplicaGetWithPrimaryAndMetaDown() throws IOException, InterruptedException {
     HTU.getAdmin().balancerSwitch(false, true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index c8a7ca1..befe28b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
@@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -86,8 +88,9 @@ public class TestReplicasClient {
   private static final Logger LOG = LoggerFactory.getLogger(TestReplicasClient.class);
 
   private static final int NB_SERVERS = 1;
-  private static Table table = null;
-  private static final byte[] row = Bytes.toBytes(TestReplicasClient.class.getName());
+  private static TableName TABLE_NAME;
+  private Table table = null;
+  private static final byte[] row = Bytes.toBytes(TestReplicasClient.class.getName());;
 
   private static HRegionInfo hriPrimary;
   private static HRegionInfo hriSecondary;
@@ -202,8 +205,8 @@ public class TestReplicasClient {
     // Create table then get the single region for our new table.
     HTableDescriptor hdt = HTU.createTableDescriptor(TestReplicasClient.class.getSimpleName());
     hdt.addCoprocessor(SlowMeCopro.class.getName());
-    table = HTU.createTable(hdt, new byte[][]{f}, null);
-
+    HTU.createTable(hdt, new byte[][]{f}, null);
+    TABLE_NAME = hdt.getTableName();
     try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) {
       hriPrimary = locator.getRegionLocation(row, false).getRegionInfo();
     }
@@ -223,7 +226,6 @@ public class TestReplicasClient {
   @AfterClass
   public static void afterClass() throws Exception {
     HRegionServer.TEST_SKIP_REPORTING_TRANSITION = false;
-    if (table != null) table.close();
     HTU.shutdownMiniCluster();
   }
 
@@ -238,6 +240,7 @@ public class TestReplicasClient {
       openRegion(hriSecondary);
     } catch (Exception ignored) {
     }
+    table = HTU.getConnection().getTable(TABLE_NAME);
   }
 
   @After
@@ -328,9 +331,10 @@ public class TestReplicasClient {
   public void testLocations() throws Exception {
     byte[] b1 = Bytes.toBytes("testLocations");
     openRegion(hriSecondary);
-    ConnectionImplementation hc = (ConnectionImplementation) HTU.getConnection();
 
-    try {
+    try (
+        ConnectionImplementation hc = ConnectionFactory.createConnectionImpl(HTU.getConfiguration(),
+          null, UserProvider.instantiate(HTU.getConfiguration()).getCurrent())) {
       hc.clearRegionLocationCache();
       RegionLocations rl = hc.locateRegion(table.getName(), b1, false, false);
       Assert.assertEquals(2, rl.size());
@@ -551,6 +555,10 @@ public class TestReplicasClient {
     }
   }
 
+  /**
+   * To be rewrite without ConnectionImplementation
+   */
+  @Ignore
   @Test
   public void testHedgedRead() throws Exception {
     byte[] b1 = Bytes.toBytes("testHedgedRead");
@@ -690,24 +698,40 @@ public class TestReplicasClient {
     }
   }
 
+  /**
+   * To be rewrite
+   */
+  @Ignore
   @Test
   public void testScanWithReplicas() throws Exception {
     //simple scan
     runMultipleScansOfOneType(false, false);
   }
 
+  /**
+   * To be rewrite
+   */
+  @Ignore
   @Test
   public void testSmallScanWithReplicas() throws Exception {
     //small scan
     runMultipleScansOfOneType(false, true);
   }
 
+  /**
+   * To be rewrite
+   */
+  @Ignore
   @Test
   public void testReverseScanWithReplicas() throws Exception {
     //reverse scan
     runMultipleScansOfOneType(true, false);
   }
 
+  /**
+   * To be rewrite
+   */
+  @Ignore
   @Test
   public void testCancelOfScan() throws Exception {
     openRegion(hriSecondary);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java
index b3295ac..858fbb7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.HBaseRpcControllerImpl;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -66,7 +67,9 @@ public class TestScanWithoutFetchingData {
 
   private static RegionInfo HRI;
 
-  private static ClientProtos.ClientService.BlockingInterface STUB;
+  private static AsyncConnectionImpl CONN;
+
+  private static ClientProtos.ClientService.Interface STUB;
 
   @BeforeClass
   public static void setUp() throws Exception {
@@ -77,8 +80,9 @@ public class TestScanWithoutFetchingData {
       }
     }
     HRI = UTIL.getAdmin().getRegions(TABLE_NAME).get(0);
-    STUB = ((ConnectionImplementation) UTIL.getConnection())
-        .getClient(UTIL.getHBaseCluster().getRegionServer(0).getServerName());
+    CONN =
+      (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get();
+    STUB = CONN.getRegionServerStub(UTIL.getHBaseCluster().getRegionServer(0).getServerName());
   }
 
   @AfterClass
@@ -86,6 +90,12 @@ public class TestScanWithoutFetchingData {
     UTIL.shutdownMiniCluster();
   }
 
+  private ScanResponse scan(HBaseRpcController hrc, ScanRequest req) throws IOException {
+    BlockingRpcCallback<ScanResponse> callback = new BlockingRpcCallback<>();
+    STUB.scan(hrc, req, callback);
+    return callback.get();
+  }
+
   private void assertResult(int row, Result result) {
     assertEquals(row, Bytes.toInt(result.getRow()));
     assertEquals(row, Bytes.toInt(result.getValue(CF, CQ)));
@@ -96,7 +106,7 @@ public class TestScanWithoutFetchingData {
     Scan scan = new Scan();
     ScanRequest req = RequestConverter.buildScanRequest(HRI.getRegionName(), scan, 0, false);
     HBaseRpcController hrc = new HBaseRpcControllerImpl();
-    ScanResponse resp = STUB.scan(hrc, req);
+    ScanResponse resp = scan(hrc, req);
     assertTrue(resp.getMoreResults());
     assertTrue(resp.getMoreResultsInRegion());
     assertEquals(0, ResponseConverter.getResults(hrc.cellScanner(), resp).length);
@@ -106,7 +116,7 @@ public class TestScanWithoutFetchingData {
     for (int i = 0; i < COUNT / 2; i++) {
       req = RequestConverter.buildScanRequest(scannerId, 1, false, nextCallSeq++, false, false, -1);
       hrc.reset();
-      resp = STUB.scan(hrc, req);
+      resp = scan(hrc, req);
       assertTrue(resp.getMoreResults());
       assertTrue(resp.getMoreResultsInRegion());
       Result[] results = ResponseConverter.getResults(hrc.cellScanner(), resp);
@@ -116,14 +126,14 @@ public class TestScanWithoutFetchingData {
     // test zero next
     req = RequestConverter.buildScanRequest(scannerId, 0, false, nextCallSeq++, false, false, -1);
     hrc.reset();
-    resp = STUB.scan(hrc, req);
+    resp = scan(hrc, req);
     assertTrue(resp.getMoreResults());
     assertTrue(resp.getMoreResultsInRegion());
     assertEquals(0, ResponseConverter.getResults(hrc.cellScanner(), resp).length);
     for (int i = COUNT / 2; i < COUNT; i++) {
       req = RequestConverter.buildScanRequest(scannerId, 1, false, nextCallSeq++, false, false, -1);
       hrc.reset();
-      resp = STUB.scan(hrc, req);
+      resp = scan(hrc, req);
       assertTrue(resp.getMoreResults());
       assertEquals(i != COUNT - 1, resp.getMoreResultsInRegion());
       Result[] results = ResponseConverter.getResults(hrc.cellScanner(), resp);
@@ -132,6 +142,7 @@ public class TestScanWithoutFetchingData {
     }
     // close
     req = RequestConverter.buildScanRequest(scannerId, 0, true, false);
-    resp = STUB.scan(null, req);
+    hrc.reset();
+    resp = scan(hrc, req);
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index af02482..a1a5136 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -20,20 +20,19 @@ package org.apache.hadoop.hbase.client;
 import static org.apache.hadoop.hbase.HConstants.RPC_CODEC_CONF_KEY;
 import static org.apache.hadoop.hbase.client.TestFromClientSide3.generateHugeValue;
 import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS;
+import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Consumer;
-import java.util.stream.IntStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CompareOperator;
@@ -90,9 +89,6 @@ public class TestScannersFromClientSide {
   @Rule
   public TestName name = new TestName();
 
-  /**
-   * @throws java.lang.Exception
-   */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
@@ -100,17 +96,11 @@ public class TestScannersFromClientSide {
     TEST_UTIL.startMiniCluster(3);
   }
 
-  /**
-   * @throws java.lang.Exception
-   */
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
     TEST_UTIL.shutdownMiniCluster();
   }
 
-  /**
-   * @throws java.lang.Exception
-   */
   @Before
   public void setUp() throws Exception {
     // Nothing to do.
@@ -126,8 +116,6 @@ public class TestScannersFromClientSide {
 
   /**
    * Test from client side for batch of scan
-   *
-   * @throws Exception
    */
   @Test
   public void testScanBatch() throws Exception {
@@ -236,17 +224,15 @@ public class TestScannersFromClientSide {
     // Create a scan with the default configuration.
     Scan scan = new Scan();
 
-    ResultScanner scanner = ht.getScanner(scan);
-    assertTrue(scanner instanceof ClientScanner);
-    ClientScanner clientScanner = (ClientScanner) scanner;
-
-    // Call next to issue a single RPC to the server
-    scanner.next();
-
-    // The scanner should have, at most, a single result in its cache. If there more results exists
-    // in the cache it means that more than the expected max result size was fetched.
-    assertTrue("The cache contains: " + clientScanner.getCacheSize() + " results",
-      clientScanner.getCacheSize() <= 1);
+    try (ResultScanner scanner = ht.getScanner(scan)) {
+      assertThat(scanner, instanceOf(AsyncTableResultScanner.class));
+      scanner.next();
+      AsyncTableResultScanner s = (AsyncTableResultScanner) scanner;
+      // The scanner should have, at most, a single result in its cache. If there more results
+      // exists
+      // in the cache it means that more than the expected max result size was fetched.
+      assertTrue("The cache contains: " + s.getCacheSize() + " results", s.getCacheSize() <= 1);
+    }
   }
 
   /**
@@ -304,11 +290,6 @@ public class TestScannersFromClientSide {
 
   /**
    * Run through a variety of test configurations with a small scan
-   * @param table
-   * @param reversed
-   * @param rows
-   * @param columns
-   * @throws Exception
    */
   private void testSmallScan(Table table, boolean reversed, int rows, int columns) throws Exception {
     Scan baseScan = new Scan();
@@ -349,8 +330,6 @@ public class TestScannersFromClientSide {
 
   /**
    * Test from client side for get with maxResultPerCF set
-   *
-   * @throws Exception
    */
   @Test
   public void testGetMaxResults() throws Exception {
@@ -469,8 +448,6 @@ public class TestScannersFromClientSide {
 
   /**
    * Test from client side for scan with maxResultPerCF set
-   *
-   * @throws Exception
    */
   @Test
   public void testScanMaxResults() throws Exception {
@@ -519,8 +496,6 @@ public class TestScannersFromClientSide {
 
   /**
    * Test from client side for get with rowOffset
-   *
-   * @throws Exception
    */
   @Test
   public void testGetRowOffset() throws Exception {
@@ -639,8 +614,6 @@ public class TestScannersFromClientSide {
   /**
    * Test from client side for scan while the region is reopened
    * on the same region server.
-   *
-   * @throws Exception
    */
   @Test
   public void testScanOnReopenedRegion() throws Exception {
@@ -713,125 +686,6 @@ public class TestScannersFromClientSide {
     verifyResult(result, kvListExp, toLog, "Testing scan on re-opened region");
   }
 
-  @Test
-  public void testAsyncScannerWithSmallData() throws Exception {
-    testAsyncScanner(TableName.valueOf(name.getMethodName()),
-      2,
-      3,
-      10,
-      -1,
-      null);
-  }
-
-  @Test
-  public void testAsyncScannerWithManyRows() throws Exception {
-    testAsyncScanner(TableName.valueOf(name.getMethodName()),
-      30000,
-      1,
-      1,
-      -1,
-      null);
-  }
-
-  @Test
-  public void testAsyncScannerWithoutCaching() throws Exception {
-    testAsyncScanner(TableName.valueOf(name.getMethodName()),
-      5,
-      1,
-      1,
-      1,
-      (b) -> {
-        try {
-          TimeUnit.MILLISECONDS.sleep(500);
-        } catch (InterruptedException ex) {
-        }
-      });
-  }
-
-  private void testAsyncScanner(TableName table, int rowNumber, int familyNumber,
-      int qualifierNumber, int caching, Consumer<Boolean> listener) throws Exception {
-    assert rowNumber > 0;
-    assert familyNumber > 0;
-    assert qualifierNumber > 0;
-    byte[] row = Bytes.toBytes("r");
-    byte[] family = Bytes.toBytes("f");
-    byte[] qualifier = Bytes.toBytes("q");
-    byte[][] rows = makeNAsciiWithZeroPrefix(row, rowNumber);
-    byte[][] families = makeNAsciiWithZeroPrefix(family, familyNumber);
-    byte[][] qualifiers = makeNAsciiWithZeroPrefix(qualifier, qualifierNumber);
-
-    Table ht = TEST_UTIL.createTable(table, families);
-
-    boolean toLog = true;
-    List<Cell> kvListExp = new ArrayList<>();
-
-    List<Put> puts = new ArrayList<>();
-    for (byte[] r : rows) {
-      Put put = new Put(r);
-      for (byte[] f : families) {
-        for (byte[] q : qualifiers) {
-          KeyValue kv = new KeyValue(r, f, q, 1, VALUE);
-          put.add(kv);
-          kvListExp.add(kv);
-        }
-      }
-      puts.add(put);
-      if (puts.size() > 1000) {
-        ht.put(puts);
-        puts.clear();
-      }
-    }
-    if (!puts.isEmpty()) {
-      ht.put(puts);
-      puts.clear();
-    }
-
-    Scan scan = new Scan();
-    scan.setAsyncPrefetch(true);
-    if (caching > 0) {
-      scan.setCaching(caching);
-    }
-    try (ResultScanner scanner = ht.getScanner(scan)) {
-      assertTrue("Not instance of async scanner",scanner instanceof ClientAsyncPrefetchScanner);
-      ((ClientAsyncPrefetchScanner) scanner).setPrefetchListener(listener);
-      List<Cell> kvListScan = new ArrayList<>();
-      Result result;
-      boolean first = true;
-      int actualRows = 0;
-      while ((result = scanner.next()) != null) {
-        ++actualRows;
-        // waiting for cache. see HBASE-17376
-        if (first) {
-          TimeUnit.SECONDS.sleep(1);
-          first = false;
-        }
-        for (Cell kv : result.listCells()) {
-          kvListScan.add(kv);
-        }
-      }
-      assertEquals(rowNumber, actualRows);
-      // These cells may have different rows but it is ok. The Result#getRow
-      // isn't used in the verifyResult()
-      result = Result.create(kvListScan);
-      verifyResult(result, kvListExp, toLog, "Testing async scan");
-    }
-
-    TEST_UTIL.deleteTable(table);
-  }
-
-  private static byte[][] makeNAsciiWithZeroPrefix(byte[] base, int n) {
-    int maxLength = Integer.toString(n).length();
-    byte [][] ret = new byte[n][];
-    for (int i = 0; i < n; i++) {
-      int length = Integer.toString(i).length();
-      StringBuilder buf = new StringBuilder(Integer.toString(i));
-      IntStream.range(0, maxLength - length).forEach(v -> buf.insert(0, "0"));
-      byte[] tail = Bytes.toBytes(buf.toString());
-      ret[i] = Bytes.add(base, tail);
-    }
-    return ret;
-  }
-
   static void verifyResult(Result result, List<Cell> expKvList, boolean toLog,
       String msg) {
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
index d7f0c87..18b4d2f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@@ -44,7 +45,7 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Category(MediumTests.class)
+@Category({ ClientTests.class, MediumTests.class })
 public class TestSeparateClientZKCluster {
   private static final Logger LOG = LoggerFactory.getLogger(TestSeparateClientZKCluster.class);
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -98,13 +99,11 @@ public class TestSeparateClientZKCluster {
     TableName tn = TableName.valueOf(name.getMethodName());
     // create table
     Connection conn = TEST_UTIL.getConnection();
-    Admin admin = conn.getAdmin();
-    HTable table = (HTable) conn.getTable(tn);
-    try {
+    try (Admin admin = conn.getAdmin(); Table table = conn.getTable(tn)) {
       ColumnFamilyDescriptorBuilder cfDescBuilder =
-          ColumnFamilyDescriptorBuilder.newBuilder(family);
+        ColumnFamilyDescriptorBuilder.newBuilder(family);
       TableDescriptorBuilder tableDescBuilder =
-          TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build());
+        TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build());
       admin.createTable(tableDescBuilder.build());
       // test simple get and put
       Put put = new Put(row);
@@ -114,9 +113,6 @@ public class TestSeparateClientZKCluster {
       Result result = table.get(get);
       LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier)));
       Assert.assertArrayEquals(value, result.getValue(family, qualifier));
-    } finally {
-      admin.close();
-      table.close();
     }
   }
 
@@ -124,9 +120,8 @@ public class TestSeparateClientZKCluster {
   public void testMasterSwitch() throws Exception {
     // get an admin instance and issue some request first
     Connection conn = TEST_UTIL.getConnection();
-    Admin admin = conn.getAdmin();
-    LOG.debug("Tables: " + admin.listTableDescriptors());
-    try {
+    try (Admin admin = conn.getAdmin()) {
+      LOG.debug("Tables: " + admin.listTableDescriptors());
       MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
       // switch active master
       HMaster master = cluster.getMaster();
@@ -139,8 +134,6 @@ public class TestSeparateClientZKCluster {
       }
       // confirm client access still works
       Assert.assertTrue(admin.balance(false));
-    } finally {
-      admin.close();
     }
   }
 
@@ -149,14 +142,14 @@ public class TestSeparateClientZKCluster {
     TableName tn = TableName.valueOf(name.getMethodName());
     // create table
     Connection conn = TEST_UTIL.getConnection();
-    Admin admin = conn.getAdmin();
-    HTable table = (HTable) conn.getTable(tn);
-    try {
+    try (Admin admin = conn.getAdmin();
+        Table table = conn.getTable(tn);
+        RegionLocator locator = conn.getRegionLocator(tn)) {
       MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
       ColumnFamilyDescriptorBuilder cfDescBuilder =
-          ColumnFamilyDescriptorBuilder.newBuilder(family);
+        ColumnFamilyDescriptorBuilder.newBuilder(family);
       TableDescriptorBuilder tableDescBuilder =
-          TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build());
+        TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build());
       admin.createTable(tableDescBuilder.build());
       // issue some requests to cache the region location
       Put put = new Put(row);
@@ -176,8 +169,7 @@ public class TestSeparateClientZKCluster {
       admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), destServerName);
       LOG.debug("Finished moving meta");
       // invalidate client cache
-      RegionInfo region =
-          table.getRegionLocator().getRegionLocation(row).getRegion();
+      RegionInfo region = locator.getRegionLocation(row).getRegion();
       ServerName currentServer = cluster.getServerHoldingRegion(tn, region.getRegionName());
       for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
         ServerName name = rst.getRegionServer().getServerName();
@@ -194,9 +186,6 @@ public class TestSeparateClientZKCluster {
       result = table.get(get);
       LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier)));
       Assert.assertArrayEquals(newVal, result.getValue(family, qualifier));
-    } finally {
-      admin.close();
-      table.close();
     }
   }
 
@@ -205,13 +194,11 @@ public class TestSeparateClientZKCluster {
     TableName tn = TableName.valueOf(name.getMethodName());
     // create table
     Connection conn = TEST_UTIL.getConnection();
-    Admin admin = conn.getAdmin();
-    Table table = conn.getTable(tn);
-    try {
+    try (Admin admin = conn.getAdmin(); Table table = conn.getTable(tn)) {
       ColumnFamilyDescriptorBuilder cfDescBuilder =
-          ColumnFamilyDescriptorBuilder.newBuilder(family);
+        ColumnFamilyDescriptorBuilder.newBuilder(family);
       TableDescriptorBuilder tableDescBuilder =
-          TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build());
+        TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build());
       admin.createTable(tableDescBuilder.build());
       // put some data
       Put put = new Put(row);
@@ -241,9 +228,6 @@ public class TestSeparateClientZKCluster {
       Result result = table.get(get);
       LOG.debug("Result: " + Bytes.toString(result.getValue(family, qualifier)));
       Assert.assertArrayEquals(value, result.getValue(family, qualifier));
-    } finally {
-      admin.close();
-      table.close();
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
deleted file mode 100644
index f743388..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.RSRpcServices;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-
-@Category({ MediumTests.class, ClientTests.class })
-public class TestShortCircuitConnection {
-
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestShortCircuitConnection.class);
-
-  private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  @Rule
-  public TestName name = new TestName();
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    UTIL.startMiniCluster(1);
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    UTIL.shutdownMiniCluster();
-  }
-
-  @Test
-  @SuppressWarnings("deprecation")
-  public void testShortCircuitConnection() throws IOException, InterruptedException {
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    HTableDescriptor htd = UTIL.createTableDescriptor(tableName);
-    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf"));
-    htd.addFamily(hcd);
-    UTIL.createTable(htd, null);
-    HRegionServer regionServer = UTIL.getRSForFirstRegionInTable(tableName);
-    ConnectionImplementation connection = (ConnectionImplementation) regionServer.getConnection();
-    Table tableIf = connection.getTable(tableName);
-    assertTrue(tableIf instanceof HTable);
-    HTable table = (HTable) tableIf;
-    assertTrue(table.getConnection() == connection);
-    AdminService.BlockingInterface admin = connection.getAdmin(regionServer.getServerName());
-    ClientService.BlockingInterface client = connection.getClient(regionServer.getServerName());
-    assertTrue(admin instanceof RSRpcServices);
-    assertTrue(client instanceof RSRpcServices);
-    ServerName anotherSn = ServerName.valueOf(regionServer.getServerName().getHostAndPort(),
-      EnvironmentEdgeManager.currentTime());
-    admin = connection.getAdmin(anotherSn);
-    client = connection.getClient(anotherSn);
-    assertFalse(admin instanceof RSRpcServices);
-    assertFalse(client instanceof RSRpcServices);
-    assertTrue(connection.getAdmin().getConnection() == connection);
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorShortCircuitRPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorSharedConnection.java
similarity index 80%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorShortCircuitRPC.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorSharedConnection.java
index 6cff379..40a3d65 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorShortCircuitRPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorSharedConnection.java
@@ -24,9 +24,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.SharedConnection;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionUtils;
+import org.apache.hadoop.hbase.client.SharedConnection;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.junit.AfterClass;
@@ -38,19 +37,19 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
 /**
- * Ensure Coprocessors get ShortCircuit Connections when they get a Connection from their
+ * Ensure Coprocessors get ShardConnections when they get a Connection from their
  * CoprocessorEnvironment.
  */
-@Category({CoprocessorTests.class, MediumTests.class})
-public class TestCoprocessorShortCircuitRPC {
+@Category({ CoprocessorTests.class, MediumTests.class })
+public class TestCoprocessorSharedConnection {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestCoprocessorShortCircuitRPC.class);
+      HBaseClassTestRule.forClass(TestCoprocessorSharedConnection.class);
 
   @Rule
   public TestName name = new TestName();
-  private static final HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
+  private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
 
   /**
    * Start up a mini cluster with my three CPs loaded.
@@ -83,8 +82,6 @@ public class TestCoprocessorShortCircuitRPC {
     public void start(CoprocessorEnvironment env) throws IOException {
       // At start, we get base CoprocessorEnvironment Type, not MasterCoprocessorEnvironment,
       checkShared(((MasterCoprocessorEnvironment) env).getConnection());
-      checkShortCircuit(
-        ((MasterCoprocessorEnvironment) env).createConnection(env.getConfiguration()));
     }
   }
 
@@ -96,8 +93,6 @@ public class TestCoprocessorShortCircuitRPC {
     public void start(CoprocessorEnvironment env) throws IOException {
       // At start, we get base CoprocessorEnvironment Type, not RegionServerCoprocessorEnvironment,
       checkShared(((RegionServerCoprocessorEnvironment) env).getConnection());
-      checkShortCircuit(
-        ((RegionServerCoprocessorEnvironment) env).createConnection(env.getConfiguration()));
     }
   }
 
@@ -109,8 +104,6 @@ public class TestCoprocessorShortCircuitRPC {
     public void start(CoprocessorEnvironment env) throws IOException {
       // At start, we get base CoprocessorEnvironment Type, not RegionCoprocessorEnvironment,
       checkShared(((RegionCoprocessorEnvironment) env).getConnection());
-      checkShortCircuit(
-        ((RegionCoprocessorEnvironment) env).createConnection(env.getConfiguration()));
     }
   }
 
@@ -118,10 +111,6 @@ public class TestCoprocessorShortCircuitRPC {
     assertTrue(connection instanceof SharedConnection);
   }
 
-  private static void checkShortCircuit(Connection connection) {
-    assertTrue(connection instanceof ConnectionUtils.ShortCircuitingClusterConnection);
-  }
-
   @Test
   public void test() throws IOException {
     // Nothing to do in here. The checks are done as part of the cluster spinup when CPs get
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java
index d55e8e0..69b9132 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java
@@ -156,10 +156,11 @@ public class TestPassCustomCellViaRegionObserver {
       table.get(new Get(ROW)).isEmpty());
     assertObserverHasExecuted();
 
-    assertTrue(table.checkAndPut(ROW, FAMILY, QUALIFIER, null, put));
+    assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put));
     assertObserverHasExecuted();
 
-    assertTrue(table.checkAndDelete(ROW, FAMILY, QUALIFIER, VALUE, delete));
+    assertTrue(
+      table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenDelete(delete));
     assertObserverHasExecuted();
 
     assertTrue(table.get(new Get(ROW)).isEmpty());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java
index 4cfc02c..1859400 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java
@@ -381,29 +381,28 @@ public class TestMultiRowRangeFilter {
   public void testMultiRowRangeFilterWithExclusive() throws IOException {
     tableName = TableName.valueOf(name.getMethodName());
     TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 6000000);
-    Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
-    ht.setReadRpcTimeout(600000);
-    ht.setOperationTimeout(6000000);
-    generateRows(numRows, ht, family, qf, value);
-
-    Scan scan = new Scan();
-    scan.setMaxVersions();
-
-    List<RowRange> ranges = new ArrayList<>();
-    ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
-    ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(40), false));
-    ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false));
-
-    MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
-    scan.setFilter(filter);
-    int resultsSize = getResultsSize(ht, scan);
-    LOG.info("found " + resultsSize + " results");
-    List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht);
-    List<Cell> results2 = getScanResult(Bytes.toBytes(65), Bytes.toBytes(75), ht);
-
-    assertEquals((results1.size() - 1) + results2.size(), resultsSize);
-
-    ht.close();
+    TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
+    try (Table ht = TEST_UTIL.getConnection().getTableBuilder(tableName, null)
+      .setReadRpcTimeout(600000).setOperationTimeout(6000000).build()) {
+      generateRows(numRows, ht, family, qf, value);
+
+      Scan scan = new Scan();
+      scan.setMaxVersions();
+
+      List<RowRange> ranges = new ArrayList<>();
+      ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
+      ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(40), false));
+      ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false));
+
+      MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
+      scan.setFilter(filter);
+      int resultsSize = getResultsSize(ht, scan);
+      LOG.info("found " + resultsSize + " results");
+      List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht);
+      List<Cell> results2 = getScanResult(Bytes.toBytes(65), Bytes.toBytes(75), ht);
+
+      assertEquals((results1.size() - 1) + results2.size(), resultsSize);
+    }
   }
 
   @Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
index a512833..5ce7886 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
@@ -105,6 +105,18 @@ public class TestMasterShutdown {
     htu.shutdownMiniCluster();
   }
 
+  private Connection createConnection(HBaseTestingUtility util) throws InterruptedException {
+    // the cluster may have not been initialized yet which means we can not get the cluster id thus
+    // an exception will be thrown. So here we need to retry.
+    for (;;) {
+      try {
+        return ConnectionFactory.createConnection(util.getConfiguration());
+      } catch (Exception e) {
+        Thread.sleep(10);
+      }
+    }
+  }
+
   @Test
   public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception {
     final int NUM_MASTERS = 1;
@@ -131,13 +143,8 @@ public class TestMasterShutdown {
       @Override
       public void run() {
         LOG.info("Before call to shutdown master");
-        try {
-          try (
-            Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
-            try (Admin admin = connection.getAdmin()) {
-              admin.shutdown();
-            }
-          }
+        try (Connection connection = createConnection(util); Admin admin = connection.getAdmin()) {
+          admin.shutdown();
         } catch (Exception e) {
           LOG.info("Error while calling Admin.shutdown, which is expected: " + e.getMessage());
         }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
index 3babd2e..a930076 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
@@ -24,7 +24,6 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
@@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.client.CompactionState;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -139,7 +139,7 @@ public class TestWarmupRegion {
         RegionInfo info = region.getRegionInfo();
 
         try {
-          HTableDescriptor htd = table.getTableDescriptor();
+          TableDescriptor htd = table.getDescriptor();
           for (int i = 0; i < 10; i++) {
             warmupHRegion(info, htd, rs.getWAL(info), rs.getConfiguration(), rs, null);
           }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java
index 6a520d1..07b834b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java
@@ -29,8 +29,6 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CompareOperator;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
@@ -82,12 +80,6 @@ public class RegionAsTable implements Table {
   }
 
   @Override
-  @Deprecated
-  public HTableDescriptor getTableDescriptor() throws IOException {
-    return new HTableDescriptor(this.region.getTableDescriptor());
-  }
-
-  @Override
   public TableDescriptor getDescriptor() throws IOException {
     return this.region.getTableDescriptor();
   }
@@ -212,21 +204,6 @@ public class RegionAsTable implements Table {
   }
 
   @Override
-  @Deprecated
-  public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
-  throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
-                             CompareOperator compareOp, byte[] value, Put put)
-  throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public void delete(Delete delete) throws IOException {
     this.region.delete(delete);
   }
@@ -237,21 +214,6 @@ public class RegionAsTable implements Table {
   }
 
   @Override
-  public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
-      Delete delete)
-  throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
-                                CompareOperator compareOp, byte[] value, Delete delete)
-  throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
     throw new UnsupportedOperationException();
   }
@@ -326,77 +288,26 @@ public class RegionAsTable implements Table {
   }
 
   @Override
-  @Deprecated
-  public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
-      CompareOperator compareOp, byte[] value, RowMutations mutation) throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public void setOperationTimeout(int operationTimeout) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public int getOperationTimeout() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public void setRpcTimeout(int rpcTimeout) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public long getReadRpcTimeout(TimeUnit unit) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  @Deprecated
-  public void setWriteRpcTimeout(int writeRpcTimeout) {throw new UnsupportedOperationException(); }
-
-  @Override
   public long getOperationTimeout(TimeUnit unit) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  @Deprecated
-  public void setReadRpcTimeout(int readRpcTimeout) {throw new UnsupportedOperationException(); }
-
-  @Override
   public long getWriteRpcTimeout(TimeUnit unit) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  @Deprecated
-  public int getRpcTimeout() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public long getRpcTimeout(TimeUnit unit) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  @Deprecated
-  public int getWriteRpcTimeout() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  @Deprecated
-  public int getReadRpcTimeout() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public RegionLocator getRegionLocator() throws IOException {
     throw new UnsupportedOperationException();
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index 57ced95..8fa7f44 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -222,7 +222,7 @@ public class TestEndToEndSplitTransaction {
     RegionSplitter(Table table) throws IOException {
       this.table = table;
       this.tableName = table.getName();
-      this.family = table.getTableDescriptor().getFamiliesKeys().iterator().next();
+      this.family = table.getDescriptor().getColumnFamilyNames().iterator().next();
       admin = TEST_UTIL.getAdmin();
       rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
       connection = TEST_UTIL.getConnection();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
index be29f1a..dc538b0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
@@ -39,10 +39,11 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -81,9 +82,9 @@ public class TestHRegionFileSystem {
     TEST_UTIL = new HBaseTestingUtility();
     Configuration conf = TEST_UTIL.getConfiguration();
     TEST_UTIL.startMiniCluster();
-    HTable table = (HTable) TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
+    Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
     assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table));
-    HRegionFileSystem regionFs = getHRegionFS(table, conf);
+    HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
     // the original block storage policy would be HOT
     String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
     String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
@@ -96,8 +97,8 @@ public class TestHRegionFileSystem {
     TEST_UTIL.shutdownMiniCluster();
     TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM");
     TEST_UTIL.startMiniCluster();
-    table = (HTable) TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
-    regionFs = getHRegionFS(table, conf);
+    table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
+    regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
 
     try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
       spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
@@ -180,14 +181,16 @@ public class TestHRegionFileSystem {
     }
   }
 
-  private HRegionFileSystem getHRegionFS(HTable table, Configuration conf) throws IOException {
+  private HRegionFileSystem getHRegionFS(Connection conn, Table table, Configuration conf)
+      throws IOException {
     FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
     Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), table.getName());
     List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
     assertEquals(1, regionDirs.size());
     List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0));
     assertEquals(2, familyDirs.size());
-    RegionInfo hri = table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo();
+    RegionInfo hri =
+      conn.getRegionLocator(table.getName()).getAllRegionLocations().get(0).getRegionInfo();
     HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri);
     return regionFs;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java
index d0bc373..3c3dadf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java
@@ -311,9 +311,10 @@ public class TestNewVersionBehaviorFromClientSide {
 
   @Test
   public void testgetColumnHint() throws IOException {
-    try (Table t = createTable()) {
-      t.setOperationTimeout(10000);
-      t.setRpcTimeout(10000);
+    createTable();
+    try (Table t =
+      TEST_UTIL.getConnection().getTableBuilder(TableName.valueOf(name.getMethodName()), null)
+        .setOperationTimeout(10000).setRpcTimeout(10000).build()) {
       t.put(new Put(ROW).addColumn(FAMILY, col1, 100, value));
       t.put(new Put(ROW).addColumn(FAMILY, col1, 101, value));
       t.put(new Put(ROW).addColumn(FAMILY, col1, 102, value));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
index 0e7c019..68ba2e0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
@@ -353,13 +353,6 @@ public class TestPerColumnFamilyFlush {
       TEST_UTIL.getAdmin().createNamespace(
         NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build());
       Table table = TEST_UTIL.createTable(TABLENAME, FAMILIES);
-      HTableDescriptor htd = table.getTableDescriptor();
-
-      for (byte[] family : FAMILIES) {
-        if (!htd.hasFamily(family)) {
-          htd.addFamily(new HColumnDescriptor(family));
-        }
-      }
 
       // Add 100 edits for CF1, 20 for CF2, 20 for CF3.
       // These will all be interleaved in the log.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 3778c20..b57ff41 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -28,11 +29,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompatibilityFactory;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -45,10 +43,12 @@ import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
@@ -154,19 +154,19 @@ public class TestRegionServerMetrics {
     admin.deleteTable(tableName);
   }
 
-  public void assertCounter(String metric, long expectedValue) {
+  private void assertCounter(String metric, long expectedValue) {
     metricsHelper.assertCounter(metric, expectedValue, serverSource);
   }
 
-  public void assertGauge(String metric, long expectedValue) {
+  private void assertGauge(String metric, long expectedValue) {
     metricsHelper.assertGauge(metric, expectedValue, serverSource);
   }
 
   // Aggregates metrics from regions and assert given list of metrics and expected values.
-  public void assertRegionMetrics(String metric, long expectedValue) throws Exception {
+  private void assertRegionMetrics(String metric, long expectedValue) throws Exception {
     try (RegionLocator locator = connection.getRegionLocator(tableName)) {
       for ( HRegionLocation location: locator.getAllRegionLocations()) {
-        HRegionInfo hri = location.getRegionInfo();
+        RegionInfo hri = location.getRegion();
         MetricsRegionAggregateSource agg =
             rs.getRegion(hri.getRegionName()).getMetrics().getSource().getAggregateSource();
         String prefix = "namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR +
@@ -178,7 +178,7 @@ public class TestRegionServerMetrics {
     }
   }
 
-  public void doNPuts(int n, boolean batch) throws Exception {
+  private void doNPuts(int n, boolean batch) throws Exception {
     if (batch) {
       List<Put> puts = new ArrayList<>();
       for (int i = 0; i < n; i++) {
@@ -194,7 +194,7 @@ public class TestRegionServerMetrics {
     }
   }
 
-  public void doNGets(int n, boolean batch) throws Exception {
+  private void doNGets(int n, boolean batch) throws Exception {
     if (batch) {
       List<Get> gets = new ArrayList<>();
       for (int i = 0; i < n; i++) {
@@ -208,7 +208,7 @@ public class TestRegionServerMetrics {
     }
   }
 
-  public void doScan(int n, boolean caching) throws IOException {
+  private void doScan(int n, boolean caching) throws IOException {
     Scan scan = new Scan();
     if (caching) {
       scan.setCaching(n);
@@ -405,21 +405,20 @@ public class TestRegionServerMetrics {
 
   @Test
   public void testScanSize() throws Exception {
-    doNPuts(100, true);  // batch put
+    doNPuts(100, true); // batch put
     Scan s = new Scan();
-    s.setBatch(1);
-    s.setCaching(1);
-    ResultScanner resultScanners = table.getScanner(s);
-
-    for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
-      Result result = resultScanners.next();
-      assertNotNull(result);
-      assertEquals(1, result.size());
-    }
-    numScanNext += NUM_SCAN_NEXT;
-    assertRegionMetrics("scanCount", NUM_SCAN_NEXT);
-    if (TABLES_ON_MASTER) {
-      assertCounter("ScanSize_num_ops", numScanNext);
+    s.setBatch(1).setCaching(1).setLimit(NUM_SCAN_NEXT).setReadType(ReadType.STREAM);
+    try (ResultScanner resultScanners = table.getScanner(s)) {
+      for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
+        Result result = resultScanners.next();
+        assertNotNull(result);
+        assertEquals(1, result.size());
+      }
+      numScanNext += NUM_SCAN_NEXT;
+      assertRegionMetrics("scanCount", NUM_SCAN_NEXT);
+      if (TABLES_ON_MASTER) {
+        assertCounter("ScanSize_num_ops", numScanNext);
+      }
     }
   }
 
@@ -427,14 +426,13 @@ public class TestRegionServerMetrics {
   public void testScanTime() throws Exception {
     doNPuts(100, true);
     Scan s = new Scan();
-    s.setBatch(1);
-    s.setCaching(1);
-    ResultScanner resultScanners = table.getScanner(s);
-
-    for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
-      Result result = resultScanners.next();
-      assertNotNull(result);
-      assertEquals(1, result.size());
+    s.setBatch(1).setCaching(1).setLimit(NUM_SCAN_NEXT);
+    try (ResultScanner resultScanners = table.getScanner(s)) {
+      for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
+        Result result = resultScanners.next();
+        assertNotNull(result);
+        assertEquals(1, result.size());
+      }
     }
     numScanNext += NUM_SCAN_NEXT;
     assertRegionMetrics("scanCount", NUM_SCAN_NEXT);
@@ -447,16 +445,16 @@ public class TestRegionServerMetrics {
   public void testScanSizeForSmallScan() throws Exception {
     doNPuts(100, true);
     Scan s = new Scan();
-    s.setSmall(true);
-    s.setCaching(1);
-    ResultScanner resultScanners = table.getScanner(s);
-
-    for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
-      Result result = resultScanners.next();
-      assertNotNull(result);
-      if (TABLES_ON_MASTER) {
-        assertEquals(1, result.size());
+    s.setCaching(1).setLimit(NUM_SCAN_NEXT).setReadType(ReadType.PREAD);
+    try (ResultScanner resultScanners = table.getScanner(s)) {
+      for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
+        Result result = resultScanners.next();
+        assertNotNull(result);
+        if (TABLES_ON_MASTER) {
+          assertEquals(1, result.size());
+        }
       }
+      assertNull(resultScanners.next());
     }
     numScanNext += NUM_SCAN_NEXT;
     assertRegionMetrics("scanCount", NUM_SCAN_NEXT);
@@ -469,11 +467,10 @@ public class TestRegionServerMetrics {
   public void testMobMetrics() throws IOException, InterruptedException {
     TableName tableName = TableName.valueOf("testMobMetricsLocal");
     int numHfiles = 5;
-    HTableDescriptor htd = new HTableDescriptor(tableName);
-    HColumnDescriptor hcd = new HColumnDescriptor(cf);
-    hcd.setMobEnabled(true);
-    hcd.setMobThreshold(0);
-    htd.addFamily(hcd);
+    TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
+      .setColumnFamily(
+        ColumnFamilyDescriptorBuilder.newBuilder(cf).setMobEnabled(true).setMobThreshold(0).build())
+      .build();
     byte[] val = Bytes.toBytes("mobdata");
     try {
       Table table = TEST_UTIL.createTable(htd, new byte[0][0], conf);
@@ -486,7 +483,7 @@ public class TestRegionServerMetrics {
       }
       metricsRegionServer.getRegionServerWrapper().forceRecompute();
       assertCounter("mobFlushCount", numHfiles);
-      Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(numHfiles));
+      Scan scan = new Scan().withStartRow(Bytes.toBytes(0)).withStopRow(Bytes.toBytes(numHfiles));
       ResultScanner scanner = table.getScanner(scan);
       scanner.next(100);
       numScanNext++;  // this is an ugly construct
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
index 5aec32a..ea9f7e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
@@ -58,6 +58,7 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -74,7 +75,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRespon
  * the client when the server has exceeded the time limit during the processing of the scan. When
  * the time limit is reached, the server will return to the Client whatever Results it has
  * accumulated (potentially empty).
+ * <p/>
+ * TODO: with async client based sync client, we will fetch result in background which makes this
+ * test broken. We need to find another way to implement the test.
  */
+@Ignore
 @Category(MediumTests.class)
 public class TestScannerHeartbeatMessages {
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java
index 130b651..3885312 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java
@@ -107,12 +107,10 @@ public class TestSettingTimeoutOnBlockingPoint {
       }
     });
     Thread getThread = new Thread(() -> {
-      try {
-        try( Table table = TEST_UTIL.getConnection().getTable(tableName)) {
-          table.setRpcTimeout(1000);
-          Delete delete = new Delete(ROW1);
-          table.delete(delete);
-        }
+      try (Table table =
+        TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) {
+        Delete delete = new Delete(ROW1);
+        table.delete(delete);
       } catch (IOException e) {
         Assert.fail(e.getMessage());
       }
@@ -122,12 +120,12 @@ public class TestSettingTimeoutOnBlockingPoint {
     Threads.sleep(1000);
     getThread.start();
     Threads.sleep(2000);
-    try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
+    try (Table table =
+      TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) {
       // We have only two handlers. The first thread will get a write lock for row1 and occupy
       // the first handler. The second thread need a read lock for row1, it should quit after 1000
       // ms and give back the handler because it can not get the lock in time.
       // So we can get the value using the second handler.
-      table.setRpcTimeout(1000);
       table.get(new Get(ROW2)); // Will throw exception if the timeout checking is failed
     } finally {
       incrementThread.interrupt();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
index e993a78..aa07126 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
@@ -216,7 +216,6 @@ public class TestReplicationBase {
     // than default
     conf1 = utility1.getConfiguration();
     zkw1 = new ZKWatcher(conf1, "cluster1", null, true);
-    admin = new ReplicationAdmin(conf1);
     LOG.info("Setup first Zk");
 
     utility2.setZkCluster(miniZK);
@@ -229,6 +228,7 @@ public class TestReplicationBase {
     // as a component in deciding maximum number of parallel batches to send to the peer cluster.
     utility2.startMiniCluster(4);
 
+    admin = new ReplicationAdmin(conf1);
     hbaseAdmin = ConnectionFactory.createConnection(conf1).getAdmin();
 
     TableDescriptor table = TableDescriptorBuilder.newBuilder(tableName)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
index 8c5299e..5a185a0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
@@ -121,7 +121,6 @@ public class TestReplicationWithTags {
     // Have to reget conf1 in case zk cluster location different
     // than default
     conf1 = utility1.getConfiguration();
-    replicationAdmin = new ReplicationAdmin(conf1);
     LOG.info("Setup first Zk");
 
     // Base conf2 on conf1 so it gets the right zk cluster.
@@ -141,6 +140,7 @@ public class TestReplicationWithTags {
     utility1.startMiniCluster(2);
     utility2.startMiniCluster(2);
 
+    replicationAdmin = new ReplicationAdmin(conf1);
     ReplicationPeerConfig rpc = new ReplicationPeerConfig();
     rpc.setClusterKey(utility2.getClusterKey());
     replicationAdmin.addPeer("2", rpc, null);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
index 6078f55..4b9b967 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
@@ -98,13 +98,13 @@ public class TestGlobalReplicationThrottler {
     utility2.setZkCluster(miniZK);
     new ZKWatcher(conf2, "cluster2", null, true);
 
-    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
     ReplicationPeerConfig rpc = new ReplicationPeerConfig();
     rpc.setClusterKey(utility2.getClusterKey());
 
     utility1.startMiniCluster();
     utility2.startMiniCluster();
 
+    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
     admin1.addPeer("peer1", rpc, null);
     admin1.addPeer("peer2", rpc, null);
     admin1.addPeer("peer3", rpc, null);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java
index d7a0277..e6a7386 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java
@@ -110,7 +110,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
     UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
     Connection connection = ConnectionFactory.createConnection(conf);
     Table t = connection.getTable(TEST_TABLE);
-    HTableDescriptor htd = new HTableDescriptor(t.getTableDescriptor());
+    HTableDescriptor htd = new HTableDescriptor(t.getDescriptor());
     htd.addCoprocessor("net.clayb.hbase.coprocessor.NotWhitelisted",
       new Path(coprocessorPath),
       Coprocessor.PRIORITY_USER, null);
@@ -122,7 +122,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
       // swallow exception from coprocessor
     }
     LOG.info("Done Modifying Table");
-    assertEquals(0, t.getTableDescriptor().getCoprocessors().size());
+    assertEquals(0, t.getDescriptor().getCoprocessorDescriptors().size());
   }
 
   /**
@@ -156,7 +156,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
     // coprocessor file
     admin.disableTable(TEST_TABLE);
     Table t = connection.getTable(TEST_TABLE);
-    HTableDescriptor htd = new HTableDescriptor(t.getTableDescriptor());
+    HTableDescriptor htd = new HTableDescriptor(t.getDescriptor());
     htd.addCoprocessor("net.clayb.hbase.coprocessor.Whitelisted",
       new Path(coprocessorPath),
       Coprocessor.PRIORITY_USER, null);
@@ -328,6 +328,6 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
     // ensure table was created and coprocessor is added to table
     LOG.info("Done Creating Table");
     Table t = connection.getTable(TEST_TABLE);
-    assertEquals(1, t.getTableDescriptor().getCoprocessors().size());
+    assertEquals(1, t.getDescriptor().getCoprocessorDescriptors().size());
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
index ba0dee3..5efc6cb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
@@ -115,7 +115,6 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
     TEST_UTIL.startMiniZKCluster();
     MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster();
     zkw1 = new ZKWatcher(conf, "cluster1", null, true);
-    admin = TEST_UTIL.getAdmin();
 
     // Base conf2 on conf1 so it gets the right zk cluster.
     conf1 = HBaseConfiguration.create(conf);
@@ -136,6 +135,7 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
     TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
     TEST_UTIL1.startMiniCluster(1);
 
+    admin = TEST_UTIL.getAdmin();
     ReplicationPeerConfig rpc = new ReplicationPeerConfig();
     rpc.setClusterKey(TEST_UTIL1.getClusterKey());
     admin.addReplicationPeer("2", rpc);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
index 0bbb115..52809b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
@@ -166,7 +166,6 @@ public class TestVisibilityLabelsReplication {
     TEST_UTIL.startMiniZKCluster();
     MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster();
     zkw1 = new ZKWatcher(conf, "cluster1", null, true);
-    admin = TEST_UTIL.getAdmin();
 
     // Base conf2 on conf1 so it gets the right zk cluster.
     conf1 = HBaseConfiguration.create(conf);
@@ -188,6 +187,7 @@ public class TestVisibilityLabelsReplication {
     TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
     TEST_UTIL1.startMiniCluster(1);
 
+    admin = TEST_UTIL.getAdmin();
     ReplicationPeerConfig rpc = new ReplicationPeerConfig();
     rpc.setClusterKey(TEST_UTIL1.getClusterKey());
     admin.addReplicationPeer("2", rpc);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
index 17674af..fb7da12 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
@@ -127,7 +127,7 @@ public class TestRegionSnapshotTask {
     Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
     final SnapshotManifest manifest =
         SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
-    manifest.addTableDescriptor(table.getTableDescriptor());
+    manifest.addTableDescriptor(table.getDescriptor());
 
     if (!fs.exists(workingDir)) {
       fs.mkdirs(workingDir);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
index bc745f6c..f68a1bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
@@ -252,8 +252,7 @@ public class TestCanaryTool {
   private void testZookeeperCanaryWithArgs(String[] args) throws Exception {
     Integer port =
       Iterables.getOnlyElement(testingUtility.getZkCluster().getClientPortList(), null);
-    testingUtility.getConfiguration().set(HConstants.ZOOKEEPER_QUORUM,
-      "localhost:" + port + "/hbase");
+    testingUtility.getConfiguration().set(HConstants.ZOOKEEPER_QUORUM, "localhost:" + port);
     ExecutorService executor = new ScheduledThreadPoolExecutor(2);
     Canary.ZookeeperStdOutSink sink = spy(new Canary.ZookeeperStdOutSink());
     Canary canary = new Canary(executor, sink);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index f245384..681499c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -144,9 +144,8 @@ public abstract class MultiThreadedAction {
 
   public static final int REPORTING_INTERVAL_MS = 5000;
 
-  public MultiThreadedAction(LoadTestDataGenerator dataGen, Configuration conf,
-                             TableName tableName,
-                             String actionLetter) throws IOException {
+  public MultiThreadedAction(LoadTestDataGenerator dataGen, Configuration conf, TableName tableName,
+      String actionLetter) throws IOException {
     this.conf = conf;
     this.dataGenerator = dataGen;
     this.tableName = tableName;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
index 1329f44..1d79701 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
@@ -137,7 +137,7 @@ public class OfflineMetaRebuildTestCore {
     return this.connection.getTable(tablename);
   }
 
-  private void dumpMeta(HTableDescriptor htd) throws IOException {
+  private void dumpMeta(TableDescriptor htd) throws IOException {
     List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName());
     for (byte[] row : metaRows) {
       LOG.info(Bytes.toString(row));
@@ -162,7 +162,7 @@ public class OfflineMetaRebuildTestCore {
       byte[] startKey, byte[] endKey) throws IOException {
 
     LOG.info("Before delete:");
-    HTableDescriptor htd = tbl.getTableDescriptor();
+    TableDescriptor htd = tbl.getDescriptor();
     dumpMeta(htd);
 
     List<HRegionLocation> regions;
@@ -171,7 +171,7 @@ public class OfflineMetaRebuildTestCore {
     }
 
     for (HRegionLocation e : regions) {
-      RegionInfo hri = e.getRegionInfo();
+      RegionInfo hri = e.getRegion();
       ServerName hsa = e.getServerName();
       if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
           && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
@@ -203,7 +203,6 @@ public class OfflineMetaRebuildTestCore {
   protected RegionInfo createRegion(Configuration conf, final Table htbl,
       byte[] startKey, byte[] endKey) throws IOException {
     Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
-    HTableDescriptor htd = htbl.getTableDescriptor();
     RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName())
         .setStartKey(startKey)
         .setEndKey(endKey)
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
index d4ac016..19b4afb 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
@@ -30,7 +30,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellBuilder;
@@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
@@ -103,16 +103,12 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements Hb
 
   /**
    * Returns a list of all the column families for a given Table.
-   *
-   * @param table table
-   * @throws IOException
    */
   byte[][] getAllColumns(Table table) throws IOException {
-    HColumnDescriptor[] cds = table.getTableDescriptor().getColumnFamilies();
+    ColumnFamilyDescriptor[] cds = table.getDescriptor().getColumnFamilies();
     byte[][] columns = new byte[cds.length][];
     for (int i = 0; i < cds.length; i++) {
-      columns[i] = Bytes.add(cds[i].getName(),
-          KeyValue.COLUMN_FAMILY_DELIM_ARRAY);
+      columns[i] = Bytes.add(cds[i].getName(), KeyValue.COLUMN_FAMILY_DELIM_ARRAY);
     }
     return columns;
   }
@@ -1090,7 +1086,7 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements Hb
       TreeMap<ByteBuffer, ColumnDescriptor> columns = new TreeMap<>();
 
       table = getTable(tableName);
-      HTableDescriptor desc = table.getTableDescriptor();
+      HTableDescriptor desc = new HTableDescriptor(table.getDescriptor());
 
       for (HColumnDescriptor e : desc.getFamilies()) {
         ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
index 8b1be58..565a9c7 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
@@ -227,7 +227,7 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements TH
   public List<Boolean> existsAll(ByteBuffer table, List<TGet> gets) throws TIOError, TException {
     Table htable = getTable(table);
     try {
-      boolean[] exists = htable.existsAll(getsFromThrift(gets));
+      boolean[] exists = htable.exists(getsFromThrift(gets));
       List<Boolean> result = new ArrayList<>(exists.length);
       for (boolean exist : exists) {
         result.add(exist);
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
index abaaba0..4db8fd6 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
@@ -367,4 +368,9 @@ public class ThriftConnection implements Connection {
   public void clearRegionLocationCache() {
     throw new NotImplementedException("clearRegionLocationCache not supported in ThriftTable");
   }
+
+  @Override
+  public AsyncConnection toAsyncConnection() {
+    throw new NotImplementedException("toAsyncConnection not supported in ThriftTable");
+  }
 }
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java
index 6db9474..eb05630 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java
@@ -409,9 +409,7 @@ public class ThriftTable implements Table {
     }
   }
 
-
-  @Override
-  public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+  private boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
       byte[] value, RowMutations mutation) throws IOException {
     try {
       ByteBuffer valueBuffer = value == null? null : ByteBuffer.wrap(value);
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index 2b3a80a..c279172 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -614,7 +614,7 @@ public class TestThriftConnection {
       assertTrue(Bytes.equals(VALUE_1, value1));
       assertNull(value2);
       assertTrue(table.exists(get));
-      assertEquals(1, table.existsAll(Collections.singletonList(get)).length);
+      assertEquals(1, table.exists(Collections.singletonList(get)).length);
       Delete delete = new Delete(ROW_1);
 
       table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1)


[hbase] 27/27: HBASE-22351 Addendum limit the concurrency when creating table

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 5db9b8115e3658182eece94715d8d35927d7cb98
Author: zhangduo <zh...@apache.org>
AuthorDate: Thu May 23 22:22:25 2019 +0800

    HBASE-22351 Addendum limit the concurrency when creating table
---
 .../hbase/master/procedure/TestProcedurePriority.java      | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
index 36f31e0..3a01d16 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
@@ -22,13 +22,14 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
+import org.apache.hadoop.hbase.client.AsyncAdmin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
@@ -108,16 +109,19 @@ public class TestProcedurePriority {
     UTIL.getConfiguration().setLong(ProcedureExecutor.WORKER_KEEP_ALIVE_TIME_CONF_KEY, 5000);
     UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 4);
     UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MyCP.class.getName());
-    UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 100);
     UTIL.startMiniCluster(3);
     CORE_POOL_SIZE =
       UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getCorePoolSize();
     TABLE_COUNT = 50 * CORE_POOL_SIZE;
     List<Future<?>> futures = new ArrayList<>();
+    AsyncAdmin admin = UTIL.getAsyncConnection().getAdmin();
+    Semaphore concurrency = new Semaphore(10);
     for (int i = 0; i < TABLE_COUNT; i++) {
-      futures.add(UTIL.getAdmin().createTableAsync(
-        TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE_NAME_PREFIX + i))
-          .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build()));
+      concurrency.acquire();
+      futures.add(admin
+        .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE_NAME_PREFIX + i))
+          .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build())
+        .whenComplete((r, e) -> concurrency.release()));
     }
     for (Future<?> future : futures) {
       future.get(3, TimeUnit.MINUTES);


[hbase] 15/27: HBASE-21725 Implement BufferedMutator Based on AsyncBufferedMutator

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit cc3cfbc5556d26b42e5eaf7760bb3979caa047cf
Author: zhangduo <zh...@apache.org>
AuthorDate: Sun Apr 14 20:32:38 2019 +0800

    HBASE-21725 Implement BufferedMutator Based on AsyncBufferedMutator
---
 .../hadoop/hbase/client/BufferedMutator.java       |  10 ++
 .../BufferedMutatorOverAsyncBufferedMutator.java   | 175 +++++++++++++++++++++
 .../hadoop/hbase/client/BufferedMutatorParams.java |  23 ++-
 .../client/ConnectionOverAsyncConnection.java      |  19 ++-
 .../hadoop/hbase/client/TestBufferedMutator.java   |  82 ----------
 .../hadoop/hbase/client/TestBufferedMutator.java   |  90 +++++++++++
 6 files changed, 309 insertions(+), 90 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java
index 7805f77..8ad6a79 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java
@@ -62,7 +62,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface BufferedMutator extends Closeable {
   /**
    * Key to use setting non-default BufferedMutator implementation in Configuration.
+   * <p/>
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it
+   *             any more.
    */
+  @Deprecated
   String CLASSNAME_KEY = "hbase.client.bufferedmutator.classname";
 
   /**
@@ -179,12 +183,18 @@ public interface BufferedMutator extends Closeable {
 
   /**
    * Set rpc timeout for this mutator instance
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. Please set this through the
+   *             {@link BufferedMutatorParams}.
    */
+  @Deprecated
   void setRpcTimeout(int timeout);
 
   /**
    * Set operation timeout for this mutator instance
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. Please set this through the
+   *             {@link BufferedMutatorParams}.
    */
+  @Deprecated
   void setOperationTimeout(int timeout);
 
   /**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java
new file mode 100644
index 0000000..a7d4595
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * {@link BufferedMutator} implementation based on {@link AsyncBufferedMutator}.
+ */
+@InterfaceAudience.Private
+class BufferedMutatorOverAsyncBufferedMutator implements BufferedMutator {
+
+  private final AsyncBufferedMutator mutator;
+
+  private final ExceptionListener listener;
+
+  private List<CompletableFuture<Void>> futures = new ArrayList<>();
+
+  private final ConcurrentLinkedQueue<Pair<Mutation, Throwable>> errors =
+    new ConcurrentLinkedQueue<>();
+
+  private final static int BUFFERED_FUTURES_THRESHOLD = 1024;
+
+  BufferedMutatorOverAsyncBufferedMutator(AsyncBufferedMutator mutator,
+      ExceptionListener listener) {
+    this.mutator = mutator;
+    this.listener = listener;
+  }
+
+  @Override
+  public TableName getName() {
+    return mutator.getName();
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+    return mutator.getConfiguration();
+  }
+
+  @Override
+  public void mutate(Mutation mutation) throws IOException {
+    mutate(Collections.singletonList(mutation));
+  }
+
+  private static final Pattern ADDR_MSG_MATCHER = Pattern.compile("Call to (\\S+) failed");
+
+  // not always work, so may return an empty string
+  private String getHostnameAndPort(Throwable error) {
+    Matcher matcher = ADDR_MSG_MATCHER.matcher(error.getMessage());
+    if (matcher.matches()) {
+      return matcher.group(1);
+    } else {
+      return "";
+    }
+  }
+
+  private RetriesExhaustedWithDetailsException makeError() {
+    List<Row> rows = new ArrayList<>();
+    List<Throwable> throwables = new ArrayList<>();
+    List<String> hostnameAndPorts = new ArrayList<>();
+    for (;;) {
+      Pair<Mutation, Throwable> pair = errors.poll();
+      if (pair == null) {
+        break;
+      }
+      rows.add(pair.getFirst());
+      throwables.add(pair.getSecond());
+      hostnameAndPorts.add(getHostnameAndPort(pair.getSecond()));
+    }
+    return new RetriesExhaustedWithDetailsException(throwables, rows, hostnameAndPorts);
+  }
+
+  @Override
+  public void mutate(List<? extends Mutation> mutations) throws IOException {
+    List<CompletableFuture<Void>> toBuffered = new ArrayList<>();
+    List<CompletableFuture<Void>> fs = mutator.mutate(mutations);
+    for (int i = 0, n = fs.size(); i < n; i++) {
+      CompletableFuture<Void> toComplete = new CompletableFuture<>();
+      final int index = i;
+      addListener(fs.get(index), (r, e) -> {
+        if (e != null) {
+          errors.add(Pair.newPair(mutations.get(index), e));
+          toComplete.completeExceptionally(e);
+        } else {
+          toComplete.complete(r);
+        }
+      });
+      toBuffered.add(toComplete);
+    }
+    synchronized (this) {
+      futures.addAll(toBuffered);
+      if (futures.size() > BUFFERED_FUTURES_THRESHOLD) {
+        tryCompleteFuture();
+      }
+      if (!errors.isEmpty()) {
+        RetriesExhaustedWithDetailsException error = makeError();
+        listener.onException(error, this);
+      }
+    }
+  }
+
+  private void tryCompleteFuture() {
+    futures = futures.stream().filter(f -> !f.isDone()).collect(Collectors.toList());
+  }
+
+  @Override
+  public void close() throws IOException {
+    flush();
+    mutator.close();
+  }
+
+  @Override
+  public void flush() throws IOException {
+    mutator.flush();
+    synchronized (this) {
+      List<CompletableFuture<Void>> toComplete = this.futures;
+      this.futures = new ArrayList<>();
+      try {
+        CompletableFuture.allOf(toComplete.toArray(new CompletableFuture<?>[toComplete.size()]))
+          .join();
+      } catch (CompletionException e) {
+        // just ignore, we will record the actual error in the errors field
+      }
+      if (!errors.isEmpty()) {
+        RetriesExhaustedWithDetailsException error = makeError();
+        listener.onException(error, this);
+      }
+    }
+  }
+
+  @Override
+  public long getWriteBufferSize() {
+    return mutator.getWriteBufferSize();
+  }
+
+  @Override
+  public void setRpcTimeout(int timeout) {
+    // no effect
+  }
+
+  @Override
+  public void setOperationTimeout(int timeout) {
+    // no effect
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
index 3f6c565..49fb77b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
@@ -101,13 +101,21 @@ public class BufferedMutatorParams implements Cloneable {
     return this;
   }
 
+  /**
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. We use a common timer in the whole client
+   *             implementation so you can not set it any more.
+   */
+  @Deprecated
   public long getWriteBufferPeriodicFlushTimerTickMs() {
     return writeBufferPeriodicFlushTimerTickMs;
   }
 
   /**
    * Set the TimerTick how often the buffer timeout if checked.
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. We use a common timer in the whole client
+   *             implementation so you can not set it any more.
    */
+  @Deprecated
   public BufferedMutatorParams setWriteBufferPeriodicFlushTimerTickMs(long timerTickMs) {
     this.writeBufferPeriodicFlushTimerTickMs = timerTickMs;
     return this;
@@ -141,9 +149,12 @@ public class BufferedMutatorParams implements Cloneable {
   }
 
   /**
-   * @return Name of the class we will use when we construct a
-   * {@link BufferedMutator} instance or null if default implementation.
+   * @return Name of the class we will use when we construct a {@link BufferedMutator} instance or
+   *         null if default implementation.
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. You can not set it any more as the
+   *             implementation has to use too many internal stuffs in HBase.
    */
+  @Deprecated
   public String getImplementationClassName() {
     return this.implementationClassName;
   }
@@ -151,7 +162,10 @@ public class BufferedMutatorParams implements Cloneable {
   /**
    * Specify a BufferedMutator implementation other than the default.
    * @param implementationClassName Name of the BufferedMutator implementation class
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. You can not set it any more as the
+   *             implementation has to use too many internal stuffs in HBase.
    */
+  @Deprecated
   public BufferedMutatorParams implementationClassName(String implementationClassName) {
     this.implementationClassName = implementationClassName;
     return this;
@@ -169,11 +183,6 @@ public class BufferedMutatorParams implements Cloneable {
     return this;
   }
 
-  /*
-   * (non-Javadoc)
-   *
-   * @see java.lang.Object#clone()
-   */
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL",
     justification="The clone below is complete")
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
index dfe7d8f..8ec7ab8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
@@ -87,7 +87,24 @@ class ConnectionOverAsyncConnection implements Connection {
 
   @Override
   public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException {
-    return oldConn.getBufferedMutator(params);
+    AsyncBufferedMutatorBuilder builder = conn.getBufferedMutatorBuilder(params.getTableName());
+    if (params.getRpcTimeout() != BufferedMutatorParams.UNSET) {
+      builder.setRpcTimeout(params.getRpcTimeout(), TimeUnit.MILLISECONDS);
+    }
+    if (params.getOperationTimeout() != BufferedMutatorParams.UNSET) {
+      builder.setOperationTimeout(params.getOperationTimeout(), TimeUnit.MILLISECONDS);
+    }
+    if (params.getWriteBufferSize() != BufferedMutatorParams.UNSET) {
+      builder.setWriteBufferSize(params.getWriteBufferSize());
+    }
+    if (params.getWriteBufferPeriodicFlushTimeoutMs() != BufferedMutatorParams.UNSET) {
+      builder.setWriteBufferPeriodicFlush(params.getWriteBufferPeriodicFlushTimeoutMs(),
+        TimeUnit.MILLISECONDS);
+    }
+    if (params.getMaxKeyValueSize() != BufferedMutatorParams.UNSET) {
+      builder.setMaxKeyValueSize(params.getMaxKeyValueSize());
+    }
+    return new BufferedMutatorOverAsyncBufferedMutator(builder.build(), params.getListener());
   }
 
   @Override
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
deleted file mode 100644
index 96bb846..0000000
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-
-@Category({ SmallTests.class, ClientTests.class })
-public class TestBufferedMutator {
-
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestBufferedMutator.class);
-
-  @Rule
-  public TestName name = new TestName();
-
-  /**
-   * My BufferedMutator. Just to prove that I can insert a BM other than default.
-   */
-  public static class MyBufferedMutator extends BufferedMutatorImpl {
-    MyBufferedMutator(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory,
-        RpcControllerFactory rpcFactory, BufferedMutatorParams params) {
-      super(conn, rpcCallerFactory, rpcFactory, params);
-    }
-  }
-
-  @Test
-  public void testAlternateBufferedMutatorImpl() throws IOException {
-    BufferedMutatorParams params =
-      new BufferedMutatorParams(TableName.valueOf(name.getMethodName()));
-    Configuration conf = HBaseConfiguration.create();
-    conf.set(AsyncRegistryFactory.REGISTRY_IMPL_CONF_KEY, DoNothingAsyncRegistry.class.getName());
-    try (ConnectionImplementation connection = ConnectionFactory.createConnectionImpl(conf, null,
-      UserProvider.instantiate(conf).getCurrent())) {
-      BufferedMutator bm = connection.getBufferedMutator(params);
-      // Assert we get default BM if nothing specified.
-      assertTrue(bm instanceof BufferedMutatorImpl);
-      // Now try and set my own BM implementation.
-      params.implementationClassName(MyBufferedMutator.class.getName());
-      bm = connection.getBufferedMutator(params);
-      assertTrue(bm instanceof MyBufferedMutator);
-    }
-    // Now try creating a Connection after setting an alterate BufferedMutator into
-    // the configuration and confirm we get what was expected.
-    conf.set(BufferedMutator.CLASSNAME_KEY, MyBufferedMutator.class.getName());
-    try (Connection connection = ConnectionFactory.createConnectionImpl(conf, null,
-      UserProvider.instantiate(conf).getCurrent())) {
-      BufferedMutator bm = connection.getBufferedMutator(params);
-      assertTrue(bm instanceof MyBufferedMutator);
-    }
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
new file mode 100644
index 0000000..23e69ee
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertArrayEquals;
+
+import java.io.IOException;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestBufferedMutator {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestBufferedMutator.class);
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private static TableName TABLE_NAME = TableName.valueOf("test");
+
+  private static byte[] CF = Bytes.toBytes("cf");
+
+  private static byte[] CQ = Bytes.toBytes("cq");
+
+  private static int COUNT = 1024;
+
+  private static byte[] VALUE = new byte[1024];
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    TEST_UTIL.startMiniCluster(1);
+    TEST_UTIL.createTable(TABLE_NAME, CF);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void test() throws Exception {
+    try (BufferedMutator mutator = TEST_UTIL.getConnection().getBufferedMutator(TABLE_NAME)) {
+      mutator.mutate(IntStream.range(0, COUNT / 2)
+        .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE))
+        .collect(Collectors.toList()));
+      mutator.flush();
+      mutator.mutate(IntStream.range(COUNT / 2, COUNT)
+        .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE))
+        .collect(Collectors.toList()));
+      mutator.close();
+      verifyData();
+    }
+  }
+
+  private void verifyData() throws IOException {
+    try (Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME)) {
+      for (int i = 0; i < COUNT; i++) {
+        Result r = table.get(new Get(Bytes.toBytes(i)));
+        assertArrayEquals(VALUE, ((Result) r).getValue(CF, CQ));
+      }
+    }
+  }
+}


[hbase] 17/27: HBASE-22223 Implement RegionLocator based on AsyncTableRegionLocator

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit a21168ac576cb533878e209ec0337144bca32a63
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Apr 16 16:52:54 2019 +0800

    HBASE-22223 Implement RegionLocator based on AsyncTableRegionLocator
---
 .../hadoop/hbase/client/AsyncConnectionImpl.java   | 12 +---
 .../client/ConnectionOverAsyncConnection.java      | 16 ++---
 .../RegionLocatorOverAsyncTableRegionLocator.java  | 70 ++++++++++++++++++++++
 3 files changed, 76 insertions(+), 22 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 4a00412..84e1da6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -28,7 +28,6 @@ import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLE
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import java.io.IOException;
-import java.io.UncheckedIOException;
 import java.net.SocketAddress;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
@@ -208,7 +207,7 @@ class AsyncConnectionImpl implements AsyncConnection {
     metrics.ifPresent(MetricsConnection::shutdown);
     ConnectionOverAsyncConnection c = this.conn;
     if (c != null) {
-      c.closeConnImpl();
+      c.closePool();
     }
     closed = true;
   }
@@ -362,14 +361,7 @@ class AsyncConnectionImpl implements AsyncConnection {
       if (c != null) {
         return c;
       }
-      try {
-        c = new ConnectionOverAsyncConnection(this,
-          ConnectionFactory.createConnectionImpl(conf, null, user));
-      } catch (IOException e) {
-        // TODO: finally we will not rely on ConnectionImplementation anymore and there will no
-        // IOException here.
-        throw new UncheckedIOException(e);
-      }
+      c = new ConnectionOverAsyncConnection(this);
       this.conn = c;
     }
     return c;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
index 8ec7ab8..861aab0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
@@ -43,20 +43,12 @@ class ConnectionOverAsyncConnection implements Connection {
 
   private volatile ExecutorService batchPool = null;
 
-  protected final AsyncConnectionImpl conn;
-
-  /**
-   * @deprecated we can not implement all the related stuffs at once so keep it here for now, will
-   *             remove it after we implement all the stuffs, like Admin, RegionLocator, etc.
-   */
-  @Deprecated
-  private final ConnectionImplementation oldConn;
+  private final AsyncConnectionImpl conn;
 
   private final ConnectionConfiguration connConf;
 
-  ConnectionOverAsyncConnection(AsyncConnectionImpl conn, ConnectionImplementation oldConn) {
+  ConnectionOverAsyncConnection(AsyncConnectionImpl conn) {
     this.conn = conn;
-    this.oldConn = oldConn;
     this.connConf = new ConnectionConfiguration(conn.getConfiguration());
   }
 
@@ -109,7 +101,7 @@ class ConnectionOverAsyncConnection implements Connection {
 
   @Override
   public RegionLocator getRegionLocator(TableName tableName) throws IOException {
-    return oldConn.getRegionLocator(tableName);
+    return new RegionLocatorOverAsyncTableRegionLocator(conn.getRegionLocator(tableName));
   }
 
   @Override
@@ -129,7 +121,7 @@ class ConnectionOverAsyncConnection implements Connection {
 
   // will be called from AsyncConnection, to avoid infinite loop as in the above method we will call
   // AsyncConnection.close.
-  void closeConnImpl() {
+  void closePool() {
     ExecutorService batchPool = this.batchPool;
     if (batchPool != null) {
       ConnectionUtils.shutdownPool(batchPool);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocatorOverAsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocatorOverAsyncTableRegionLocator.java
new file mode 100644
index 0000000..5e21e3b
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocatorOverAsyncTableRegionLocator.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.util.FutureUtils.get;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The {@link RegionLocator} implementation based on {@link AsyncTableRegionLocator}.
+ */
+@InterfaceAudience.Private
+class RegionLocatorOverAsyncTableRegionLocator implements RegionLocator {
+
+  private final AsyncTableRegionLocator locator;
+
+  RegionLocatorOverAsyncTableRegionLocator(AsyncTableRegionLocator locator) {
+    this.locator = locator;
+  }
+
+  @Override
+  public void close() {
+  }
+
+  @Override
+  public HRegionLocation getRegionLocation(byte[] row, int replicaId, boolean reload)
+      throws IOException {
+    return get(locator.getRegionLocation(row, replicaId, reload));
+  }
+
+  @Override
+  public List<HRegionLocation> getRegionLocations(byte[] row, boolean reload) throws IOException {
+    return get(locator.getRegionLocations(row, reload));
+  }
+
+  @Override
+  public void clearRegionLocationCache() {
+    locator.clearRegionLocationCache();
+  }
+
+  @Override
+  public List<HRegionLocation> getAllRegionLocations() throws IOException {
+    return get(locator.getAllRegionLocations());
+  }
+
+  @Override
+  public TableName getName() {
+    return locator.getName();
+  }
+
+}


[hbase] 07/27: HBASE-21537 Rewrite ServerManager.closeRegionSilentlyAndWait to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 1453314252f0b7992e7ca69b5be763cecb1ef8cb
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Jan 10 11:47:41 2019 +0800

    HBASE-21537 Rewrite ServerManager.closeRegionSilentlyAndWait to use AsyncClusterConnection
    
    Signed-off-by: Michael Stack <st...@apache.org>
---
 .../hadoop/hbase/master/MasterMetaBootstrap.java   |  2 +-
 .../apache/hadoop/hbase/master/ServerManager.java  | 41 +++++++++++++---------
 .../apache/hadoop/hbase/util/HBaseFsckRepair.java  | 22 +++++++-----
 3 files changed, 38 insertions(+), 27 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
index e57817e..6e38bdd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
@@ -101,7 +101,7 @@ class MasterMetaBootstrap {
           RegionState r = MetaTableLocator.getMetaRegionState(zooKeeper, replicaId);
           LOG.info("Closing excess replica of meta region " + r.getRegion());
           // send a close and wait for a max of 30 seconds
-          ServerManager.closeRegionSilentlyAndWait(master.getClusterConnection(),
+          ServerManager.closeRegionSilentlyAndWait(master.getAsyncClusterConnection(),
               r.getServerName(), r.getRegion(), 30000);
           ZKUtil.deleteNode(zooKeeper, zooKeeper.getZNodePaths().getZNodeForReplica(replicaId));
         }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index eb41144..6a121c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -48,13 +48,15 @@ import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.YouAreDeadException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -67,6 +69,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
@@ -660,35 +663,39 @@ public class ServerManager {
   }
 
   /**
-   * Contacts a region server and waits up to timeout ms
-   * to close the region.  This bypasses the active hmaster.
+   * Contacts a region server and waits up to timeout ms to close the region. This bypasses the
+   * active hmaster.
    */
-  public static void closeRegionSilentlyAndWait(ClusterConnection connection,
-    ServerName server, RegionInfo region, long timeout) throws IOException, InterruptedException {
-    AdminService.BlockingInterface rs = connection.getAdmin(server);
-    HBaseRpcController controller = connection.getRpcControllerFactory().newController();
+  public static void closeRegionSilentlyAndWait(AsyncClusterConnection connection,
+      ServerName server, RegionInfo region, long timeout) throws IOException, InterruptedException {
+    AsyncRegionServerAdmin admin = connection.getRegionServerAdmin(server);
     try {
-      ProtobufUtil.closeRegion(controller, rs, server, region.getRegionName());
+      FutureUtils.get(
+        admin.closeRegion(ProtobufUtil.buildCloseRegionRequest(server, region.getRegionName())));
     } catch (IOException e) {
       LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
     }
     long expiration = timeout + System.currentTimeMillis();
     while (System.currentTimeMillis() < expiration) {
-      controller.reset();
       try {
-        RegionInfo rsRegion =
-          ProtobufUtil.getRegionInfo(controller, rs, region.getRegionName());
-        if (rsRegion == null) return;
+        RegionInfo rsRegion = ProtobufUtil.toRegionInfo(FutureUtils
+          .get(
+            admin.getRegionInfo(RequestConverter.buildGetRegionInfoRequest(region.getRegionName())))
+          .getRegionInfo());
+        if (rsRegion == null) {
+          return;
+        }
       } catch (IOException ioe) {
-        if (ioe instanceof NotServingRegionException) // no need to retry again
+        if (ioe instanceof NotServingRegionException) {
+          // no need to retry again
           return;
-        LOG.warn("Exception when retrieving regioninfo from: "
-          + region.getRegionNameAsString(), ioe);
+        }
+        LOG.warn("Exception when retrieving regioninfo from: " + region.getRegionNameAsString(),
+          ioe);
       }
       Thread.sleep(1000);
     }
-    throw new IOException("Region " + region + " failed to close within"
-        + " timeout " + timeout);
+    throw new IOException("Region " + region + " failed to close within" + " timeout " + timeout);
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index ec7f717..121d06c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -31,7 +31,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -143,16 +146,17 @@ public class HBaseFsckRepair {
   }
 
   /**
-   * Contacts a region server and waits up to hbase.hbck.close.timeout ms
-   * (default 120s) to close the region.  This bypasses the active hmaster.
+   * Contacts a region server and waits up to hbase.hbck.close.timeout ms (default 120s) to close
+   * the region. This bypasses the active hmaster.
    */
-  @SuppressWarnings("deprecation")
-  public static void closeRegionSilentlyAndWait(Connection connection,
-      ServerName server, RegionInfo region) throws IOException, InterruptedException {
-    long timeout = connection.getConfiguration()
-      .getLong("hbase.hbck.close.timeout", 120000);
-    ServerManager.closeRegionSilentlyAndWait((ClusterConnection)connection, server,
-         region, timeout);
+  public static void closeRegionSilentlyAndWait(Connection connection, ServerName server,
+      RegionInfo region) throws IOException, InterruptedException {
+    long timeout = connection.getConfiguration().getLong("hbase.hbck.close.timeout", 120000);
+    // this is a bit ugly but it is only used in the old hbck and tests, so I think it is fine.
+    try (AsyncClusterConnection asyncConn = ClusterConnectionFactory
+      .createAsyncClusterConnection(connection.getConfiguration(), null, User.getCurrent())) {
+      ServerManager.closeRegionSilentlyAndWait(asyncConn, server, region, timeout);
+    }
   }
 
   /**


[hbase] 26/27: HBASE-22037 Re-enable TestAvoidCellReferencesIntoShippedBlocks

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit e71aa8bfbf98972037f91e8f79ab04f1ee63da70
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue May 21 17:23:37 2019 +0800

    HBASE-22037 Re-enable TestAvoidCellReferencesIntoShippedBlocks
---
 .../TestAvoidCellReferencesIntoShippedBlocks.java    | 20 +++++---------------
 1 file changed, 5 insertions(+), 15 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
index c8c8036..ec3d468 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -292,18 +291,11 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
     }
   }
 
-  /**
-   * TODO: not sure what do we test here but seems the test can not work together with async
-   * prefetch scanner. Ignore it for now, as after HBASE-21879 is landed we will have a more natural
-   * way to deal with reference counting...
-   */
-  @Ignore
   @Test
   public void testHBASE16372InReadPath() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
     // Create a table with block size as 1024
-    final Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, null);
-    try {
+    try (Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, null)) {
       // get the block cache and region
       RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
@@ -370,7 +362,8 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
       // set partial as true so that the scan can send partial columns also
       s.setAllowPartialResults(true);
       s.setMaxResultSize(1000);
-      try (ResultScanner scanner = table.getScanner(s)) {
+      try (ScanPerNextResultScanner scanner =
+        new ScanPerNextResultScanner(TEST_UTIL.getAsyncConnection().getTable(tableName), s)) {
         Thread evictorThread = new Thread() {
           @Override
           public void run() {
@@ -402,9 +395,8 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
             s1.withStartRow(ROW3);
             s1.withStopRow(ROW5);
             s1.setCaching(1);
-            ResultScanner scanner;
-            try {
-              scanner = table.getScanner(s1);
+
+            try (ResultScanner scanner = table.getScanner(s1)) {
               int count = Iterables.size(scanner);
               assertEquals("Count the rows", 2, count);
               int newBlockRefCount = 0;
@@ -442,8 +434,6 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
         }
       }
       assertEquals("Count should give all rows ", 10, count);
-    } finally {
-      table.close();
     }
   }
 }


[hbase] 08/27: HBASE-21719 Rewrite RegionPlacementMaintainer to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 3daf1ac6ca58790f492299879b9d2e56d8e4a6f4
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Jan 15 11:43:41 2019 +0800

    HBASE-21719 Rewrite RegionPlacementMaintainer to use AsyncClusterConnection
    
    Signed-off-by: Michael Stack <st...@apache.org>
---
 .../hbase/master/RegionPlacementMaintainer.java    | 225 +++++++++++----------
 1 file changed, 113 insertions(+), 112 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index faf5e4a..fda0a9c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -1,5 +1,4 @@
 /**
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,9 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.text.DecimalFormat;
 import java.util.ArrayList;
@@ -39,29 +38,30 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.favored.FavoredNodesPlan;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.MunkresAssignment;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException;
 
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
 
@@ -71,7 +71,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavor
  */
 @InterfaceAudience.Private
 // TODO: Remove? Unused. Partially implemented only.
-public class RegionPlacementMaintainer {
+public class RegionPlacementMaintainer implements Closeable {
   private static final Logger LOG = LoggerFactory.getLogger(RegionPlacementMaintainer.class
       .getName());
   //The cost of a placement that should never be assigned.
@@ -96,9 +96,9 @@ public class RegionPlacementMaintainer {
   private final boolean enforceMinAssignmentMove;
   private RackManager rackManager;
   private Set<TableName> targetTableSet;
-  private final Connection connection;
+  private AsyncClusterConnection connection;
 
-  public RegionPlacementMaintainer(Configuration conf) {
+  public RegionPlacementMaintainer(Configuration conf) throws IOException {
     this(conf, true, true);
   }
 
@@ -109,11 +109,6 @@ public class RegionPlacementMaintainer {
     this.enforceMinAssignmentMove = enforceMinAssignmentMove;
     this.targetTableSet = new HashSet<>();
     this.rackManager = new RackManager(conf);
-    try {
-      this.connection = ConnectionFactory.createConnection(this.conf);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
   }
 
   private static void printHelp(Options opt) {
@@ -124,6 +119,14 @@ public class RegionPlacementMaintainer {
         " [-fs hdfs://a.b.c.d:9000] [-hbase_root /HBASE]", opt);
   }
 
+  private AsyncClusterConnection getConnection() throws IOException {
+    if (connection == null) {
+      connection =
+        ClusterConnectionFactory.createAsyncClusterConnection(this.conf, null, User.getCurrent());
+    }
+    return connection;
+  }
+
   public void setTargetTableName(String[] tableNames) {
     if (tableNames != null) {
       for (String table : tableNames)
@@ -133,10 +136,8 @@ public class RegionPlacementMaintainer {
 
   /**
    * @return the new RegionAssignmentSnapshot
-   * @throws IOException
    */
-  public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot()
-  throws IOException {
+  public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot() throws IOException {
     SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot =
       new SnapshotOfRegionAssignmentFromMeta(ConnectionFactory.createConnection(conf));
     currentAssignmentShapshot.initialize();
@@ -145,9 +146,6 @@ public class RegionPlacementMaintainer {
 
   /**
    * Verify the region placement is consistent with the assignment plan
-   * @param isDetailMode
-   * @return reports
-   * @throws IOException
    */
   public List<AssignmentVerificationReport> verifyRegionPlacement(boolean isDetailMode)
       throws IOException {
@@ -206,10 +204,9 @@ public class RegionPlacementMaintainer {
 
     // Get the all the region servers
     List<ServerName> servers = new ArrayList<>();
-    try (Admin admin = this.connection.getAdmin()) {
-      servers.addAll(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+    servers.addAll(
+      FutureUtils.get(getConnection().getAdmin().getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)))
         .getLiveServerMetrics().keySet());
-    }
 
     LOG.info("Start to generate assignment plan for " + numRegions +
         " regions from table " + tableName + " with " +
@@ -492,6 +489,11 @@ public class RegionPlacementMaintainer {
     return plan;
   }
 
+  @Override
+  public void close() throws IOException {
+    Closeables.close(connection, true);
+  }
+
   /**
    * Some algorithms for solving the assignment problem may traverse workers or
    * jobs in linear order which may result in skewing the assignments of the
@@ -690,19 +692,17 @@ public class RegionPlacementMaintainer {
         }
         if (singleServerPlan != null) {
           // Update the current region server with its updated favored nodes
-          BlockingInterface currentRegionServer =
-            ((ClusterConnection)this.connection).getAdmin(entry.getKey());
+          AsyncRegionServerAdmin rsAdmin = getConnection().getRegionServerAdmin(entry.getKey());
           UpdateFavoredNodesRequest request =
-              RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
-
+            RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
           UpdateFavoredNodesResponse updateFavoredNodesResponse =
-              currentRegionServer.updateFavoredNodes(null, request);
+            FutureUtils.get(rsAdmin.updateFavoredNodes(request));
           LOG.info("Region server " +
-              ProtobufUtil.getServerInfo(null, currentRegionServer).getServerName() +
-              " has updated " + updateFavoredNodesResponse.getResponse() + " / " +
-              singleServerPlan.getAssignmentMap().size() +
-              " regions with the assignment plan");
-          succeededNum ++;
+            FutureUtils.get(rsAdmin.getServerInfo(RequestConverter.buildGetServerInfoRequest()))
+              .getServerInfo() +
+            " has updated " + updateFavoredNodesResponse.getResponse() + " / " +
+            singleServerPlan.getAssignmentMap().size() + " regions with the assignment plan");
+          succeededNum++;
         }
       } catch (Exception e) {
         failedUpdateMap.put(entry.getKey(), e);
@@ -719,7 +719,7 @@ public class RegionPlacementMaintainer {
           " region servers with its corresponding favored nodes");
       for (Map.Entry<ServerName, Exception> entry :
         failedUpdateMap.entrySet() ) {
-        LOG.error("Failed to update " + entry.getKey().getHostAndPort() +
+        LOG.error("Failed to update " + entry.getKey().getAddress() +
             " because of " + entry.getValue().getMessage());
       }
     }
@@ -1019,93 +1019,94 @@ public class RegionPlacementMaintainer {
       }
 
       // Create the region placement obj
-      RegionPlacementMaintainer rp = new RegionPlacementMaintainer(conf, enforceLocality,
-          enforceMinAssignmentMove);
+      try (RegionPlacementMaintainer rp =
+        new RegionPlacementMaintainer(conf, enforceLocality, enforceMinAssignmentMove)) {
 
-      if (cmd.hasOption("d") || cmd.hasOption("verification-details")) {
-        verificationDetails = true;
-      }
-
-      if (cmd.hasOption("tables")) {
-        String tableNameListStr = cmd.getOptionValue("tables");
-        String[] tableNames = StringUtils.split(tableNameListStr, ",");
-        rp.setTargetTableName(tableNames);
-      }
+        if (cmd.hasOption("d") || cmd.hasOption("verification-details")) {
+          verificationDetails = true;
+        }
 
-      if (cmd.hasOption("munkres")) {
-        USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = true;
-      }
+        if (cmd.hasOption("tables")) {
+          String tableNameListStr = cmd.getOptionValue("tables");
+          String[] tableNames = StringUtils.split(tableNameListStr, ",");
+          rp.setTargetTableName(tableNames);
+        }
 
-      // Read all the modes
-      if (cmd.hasOption("v") || cmd.hasOption("verify")) {
-        // Verify the region placement.
-        rp.verifyRegionPlacement(verificationDetails);
-      } else if (cmd.hasOption("n") || cmd.hasOption("dry-run")) {
-        // Generate the assignment plan only without updating the hbase:meta and RS
-        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
-        printAssignmentPlan(plan);
-      } else if (cmd.hasOption("w") || cmd.hasOption("write")) {
-        // Generate the new assignment plan
-        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
-        // Print the new assignment plan
-        printAssignmentPlan(plan);
-        // Write the new assignment plan to META
-        rp.updateAssignmentPlanToMeta(plan);
-      } else if (cmd.hasOption("u") || cmd.hasOption("update")) {
-        // Generate the new assignment plan
-        FavoredNodesPlan plan = rp.getNewAssignmentPlan();
-        // Print the new assignment plan
-        printAssignmentPlan(plan);
-        // Update the assignment to hbase:meta and Region Servers
-        rp.updateAssignmentPlan(plan);
-      } else if (cmd.hasOption("diff")) {
-        FavoredNodesPlan newPlan = rp.getNewAssignmentPlan();
-        Map<String, Map<String, Float>> locality = FSUtils
-            .getRegionDegreeLocalityMappingFromFS(conf);
-        Map<TableName, Integer> movesPerTable = rp.getRegionsMovement(newPlan);
-        rp.checkDifferencesWithOldPlan(movesPerTable, locality, newPlan);
-        System.out.println("Do you want to update the assignment plan? [y/n]");
-        Scanner s = new Scanner(System.in);
-        String input = s.nextLine().trim();
-        if (input.equals("y")) {
-          System.out.println("Updating assignment plan...");
-          rp.updateAssignmentPlan(newPlan);
+        if (cmd.hasOption("munkres")) {
+          USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = true;
         }
-        s.close();
-      } else if (cmd.hasOption("ld")) {
-        Map<String, Map<String, Float>> locality = FSUtils
-            .getRegionDegreeLocalityMappingFromFS(conf);
-        rp.printLocalityAndDispersionForCurrentPlan(locality);
-      } else if (cmd.hasOption("p") || cmd.hasOption("print")) {
-        FavoredNodesPlan plan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan();
-        printAssignmentPlan(plan);
-      } else if (cmd.hasOption("overwrite")) {
-        if (!cmd.hasOption("f") || !cmd.hasOption("r")) {
-          throw new IllegalArgumentException("Please specify: " +
+
+        // Read all the modes
+        if (cmd.hasOption("v") || cmd.hasOption("verify")) {
+          // Verify the region placement.
+          rp.verifyRegionPlacement(verificationDetails);
+        } else if (cmd.hasOption("n") || cmd.hasOption("dry-run")) {
+          // Generate the assignment plan only without updating the hbase:meta and RS
+          FavoredNodesPlan plan = rp.getNewAssignmentPlan();
+          printAssignmentPlan(plan);
+        } else if (cmd.hasOption("w") || cmd.hasOption("write")) {
+          // Generate the new assignment plan
+          FavoredNodesPlan plan = rp.getNewAssignmentPlan();
+          // Print the new assignment plan
+          printAssignmentPlan(plan);
+          // Write the new assignment plan to META
+          rp.updateAssignmentPlanToMeta(plan);
+        } else if (cmd.hasOption("u") || cmd.hasOption("update")) {
+          // Generate the new assignment plan
+          FavoredNodesPlan plan = rp.getNewAssignmentPlan();
+          // Print the new assignment plan
+          printAssignmentPlan(plan);
+          // Update the assignment to hbase:meta and Region Servers
+          rp.updateAssignmentPlan(plan);
+        } else if (cmd.hasOption("diff")) {
+          FavoredNodesPlan newPlan = rp.getNewAssignmentPlan();
+          Map<String, Map<String, Float>> locality =
+            FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
+          Map<TableName, Integer> movesPerTable = rp.getRegionsMovement(newPlan);
+          rp.checkDifferencesWithOldPlan(movesPerTable, locality, newPlan);
+          System.out.println("Do you want to update the assignment plan? [y/n]");
+          Scanner s = new Scanner(System.in);
+          String input = s.nextLine().trim();
+          if (input.equals("y")) {
+            System.out.println("Updating assignment plan...");
+            rp.updateAssignmentPlan(newPlan);
+          }
+          s.close();
+        } else if (cmd.hasOption("ld")) {
+          Map<String, Map<String, Float>> locality =
+            FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
+          rp.printLocalityAndDispersionForCurrentPlan(locality);
+        } else if (cmd.hasOption("p") || cmd.hasOption("print")) {
+          FavoredNodesPlan plan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan();
+          printAssignmentPlan(plan);
+        } else if (cmd.hasOption("overwrite")) {
+          if (!cmd.hasOption("f") || !cmd.hasOption("r")) {
+            throw new IllegalArgumentException("Please specify: " +
               " -update -r regionName -f server1:port,server2:port,server3:port");
-        }
+          }
 
-        String regionName = cmd.getOptionValue("r");
-        String favoredNodesStr = cmd.getOptionValue("f");
-        LOG.info("Going to update the region " + regionName + " with the new favored nodes " +
+          String regionName = cmd.getOptionValue("r");
+          String favoredNodesStr = cmd.getOptionValue("f");
+          LOG.info("Going to update the region " + regionName + " with the new favored nodes " +
             favoredNodesStr);
-        List<ServerName> favoredNodes = null;
-        RegionInfo regionInfo =
+          List<ServerName> favoredNodes = null;
+          RegionInfo regionInfo =
             rp.getRegionAssignmentSnapshot().getRegionNameToRegionInfoMap().get(regionName);
-        if (regionInfo == null) {
-          LOG.error("Cannot find the region " + regionName + " from the META");
-        } else {
-          try {
-            favoredNodes = getFavoredNodeList(favoredNodesStr);
-          } catch (IllegalArgumentException e) {
-            LOG.error("Cannot parse the invalid favored nodes because " + e);
+          if (regionInfo == null) {
+            LOG.error("Cannot find the region " + regionName + " from the META");
+          } else {
+            try {
+              favoredNodes = getFavoredNodeList(favoredNodesStr);
+            } catch (IllegalArgumentException e) {
+              LOG.error("Cannot parse the invalid favored nodes because " + e);
+            }
+            FavoredNodesPlan newPlan = new FavoredNodesPlan();
+            newPlan.updateFavoredNodesMap(regionInfo, favoredNodes);
+            rp.updateAssignmentPlan(newPlan);
           }
-          FavoredNodesPlan newPlan = new FavoredNodesPlan();
-          newPlan.updateFavoredNodesMap(regionInfo, favoredNodes);
-          rp.updateAssignmentPlan(newPlan);
+        } else {
+          printHelp(opt);
         }
-      } else {
-        printHelp(opt);
       }
     } catch (ParseException e) {
       printHelp(opt);


[hbase] 19/27: HBASE-22295 Fix TestClientOperationTimeout

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 29fb885822edda7baa3d5d5df9f49694283b233d
Author: zhangduo <zh...@apache.org>
AuthorDate: Tue Apr 23 21:54:31 2019 +0800

    HBASE-22295 Fix TestClientOperationTimeout
    
    Signed-off-by: Michael Stack <st...@apache.org>
---
 .../test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java
index 2ce34a9..52f0c7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientOperationTimeout.java
@@ -120,7 +120,7 @@ public class TestClientOperationTimeout {
    * Tests that a get on a table throws {@link SocketTimeoutException} when the operation takes
    * longer than 'hbase.client.operation.timeout'.
    */
-  @Test(expected = SocketTimeoutException.class)
+  @Test(expected = RetriesExhaustedException.class)
   public void testGetTimeout() throws Exception {
     DELAY_GET = 600;
     TABLE.get(new Get(ROW));
@@ -130,7 +130,7 @@ public class TestClientOperationTimeout {
    * Tests that a put on a table throws {@link SocketTimeoutException} when the operation takes
    * longer than 'hbase.client.operation.timeout'.
    */
-  @Test(expected = SocketTimeoutException.class)
+  @Test(expected = RetriesExhaustedException.class)
   public void testPutTimeout() throws Exception {
     DELAY_MUTATE = 600;
 


[hbase] 25/27: HBASE-22351 Increase the wait time when creating table for TestProcedurePriority

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 68a59bac4a7f29a0e0906117bd4c427cad128281
Author: zhangduo <zh...@apache.org>
AuthorDate: Thu May 2 21:09:26 2019 +0800

    HBASE-22351 Increase the wait time when creating table for TestProcedurePriority
---
 .../apache/hadoop/hbase/master/procedure/TestProcedurePriority.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
index 1cfe17b..36f31e0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
@@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -107,6 +108,7 @@ public class TestProcedurePriority {
     UTIL.getConfiguration().setLong(ProcedureExecutor.WORKER_KEEP_ALIVE_TIME_CONF_KEY, 5000);
     UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 4);
     UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MyCP.class.getName());
+    UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 100);
     UTIL.startMiniCluster(3);
     CORE_POOL_SIZE =
       UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getCorePoolSize();
@@ -118,7 +120,7 @@ public class TestProcedurePriority {
           .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build()));
     }
     for (Future<?> future : futures) {
-      future.get(1, TimeUnit.MINUTES);
+      future.get(3, TimeUnit.MINUTES);
     }
     UTIL.getAdmin().balance(true);
     UTIL.waitUntilNoRegionsInTransition();


[hbase] 05/27: HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 2a942705b9bd382e0eb13fbf7e9327ca5805b2d2
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Wed Dec 12 09:33:33 2018 +0800

    HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection
---
 .../hbase/client/AsyncClusterConnection.java       |   8 ++
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |   8 ++
 .../hbase/client/ClusterConnectionFactory.java     |  16 +--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  36 ++++---
 .../hbase/protobuf/ReplicationProtbufUtil.java     |  15 +--
 .../hadoop/hbase/regionserver/HRegionServer.java   |   3 +-
 .../handler/RegionReplicaFlushHandler.java         | 110 ++++++++++++---------
 7 files changed, 106 insertions(+), 90 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index 1327fd7..f1f64ca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+
 /**
  * The asynchronous connection for internal usage.
  */
@@ -41,4 +44,9 @@ public interface AsyncClusterConnection extends AsyncConnection {
    * Get the rpc client we used to communicate with other servers.
    */
   RpcClient getRpcClient();
+
+  /**
+   * Flush a region and get the response.
+   */
+  CompletableFuture<FlushRegionResponse> flush(byte[] regionName, boolean writeFlushWALMarker);
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 62b9d8b..c17cca9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -60,6 +60,7 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
 import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
@@ -384,4 +385,11 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
   public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
     return new AsyncRegionServerAdmin(serverName, this);
   }
+
+  @Override
+  public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
+      boolean writeFlushWALMarker) {
+    RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin();
+    return admin.flushRegionInternal(regionName, writeFlushWALMarker);
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
index 68c0630..79484db 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -18,15 +18,12 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.SocketAddress;
-import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-
 /**
  * The factory for creating {@link AsyncClusterConnection}.
  */
@@ -48,16 +45,7 @@ public final class ClusterConnectionFactory {
   public static AsyncClusterConnection createAsyncClusterConnection(Configuration conf,
       SocketAddress localAddress, User user) throws IOException {
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(conf);
-    String clusterId;
-    try {
-      clusterId = registry.getClusterId().get();
-    } catch (InterruptedException e) {
-      throw (IOException) new InterruptedIOException().initCause(e);
-    } catch (ExecutionException e) {
-      Throwable cause = e.getCause();
-      Throwables.propagateIfPossible(cause, IOException.class);
-      throw new IOException(cause);
-    }
+    String clusterId = FutureUtils.get(registry.getClusterId());
     return new AsyncConnectionImpl(conf, registry, clusterId, localAddress, user);
   }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 3303fd3..b3d3468 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -910,7 +910,19 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
 
   @Override
   public CompletableFuture<Void> flushRegion(byte[] regionName) {
-    CompletableFuture<Void> future = new CompletableFuture<>();
+    return flushRegionInternal(regionName, false).thenAccept(r -> {
+    });
+  }
+
+  /**
+   * This method is for internal use only, where we need the response of the flush.
+   * <p/>
+   * As it exposes the protobuf message, please do <strong>NOT</strong> try to expose it as a public
+   * API.
+   */
+  CompletableFuture<FlushRegionResponse> flushRegionInternal(byte[] regionName,
+      boolean writeFlushWALMarker) {
+    CompletableFuture<FlushRegionResponse> future = new CompletableFuture<>();
     addListener(getRegionLocation(regionName), (location, err) -> {
       if (err != null) {
         future.completeExceptionally(err);
@@ -922,7 +934,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
           .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
         return;
       }
-      addListener(flush(serverName, location.getRegion()), (ret, err2) -> {
+      addListener(flush(serverName, location.getRegion(), writeFlushWALMarker), (ret, err2) -> {
         if (err2 != null) {
           future.completeExceptionally(err2);
         } else {
@@ -933,15 +945,14 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
     return future;
   }
 
-  private CompletableFuture<Void> flush(final ServerName serverName, final RegionInfo regionInfo) {
-    return this.<Void> newAdminCaller()
-            .serverName(serverName)
-            .action(
-              (controller, stub) -> this.<FlushRegionRequest, FlushRegionResponse, Void> adminCall(
-                controller, stub, RequestConverter.buildFlushRegionRequest(regionInfo
-                  .getRegionName()), (s, c, req, done) -> s.flushRegion(c, req, done),
-                resp -> null))
-            .call();
+  private CompletableFuture<FlushRegionResponse> flush(ServerName serverName, RegionInfo regionInfo,
+      boolean writeFlushWALMarker) {
+    return this.<FlushRegionResponse> newAdminCaller().serverName(serverName)
+      .action((controller, stub) -> this
+        .<FlushRegionRequest, FlushRegionResponse, FlushRegionResponse> adminCall(controller, stub,
+          RequestConverter.buildFlushRegionRequest(regionInfo.getRegionName(), writeFlushWALMarker),
+          (s, c, req, done) -> s.flushRegion(c, req, done), resp -> resp))
+      .call();
   }
 
   @Override
@@ -954,7 +965,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
       }
       List<CompletableFuture<Void>> compactFutures = new ArrayList<>();
       if (hRegionInfos != null) {
-        hRegionInfos.forEach(region -> compactFutures.add(flush(sn, region)));
+        hRegionInfos.forEach(region -> compactFutures.add(flush(sn, region, false).thenAccept(r -> {
+        })));
       }
       addListener(CompletableFuture.allOf(
         compactFutures.toArray(new CompletableFuture<?>[compactFutures.size()])), (ret, err2) -> {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 74fad26..9f41a76 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -18,13 +18,10 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
-
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
@@ -32,12 +29,12 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
@@ -60,15 +57,7 @@ public class ReplicationProtbufUtil {
       throws IOException {
     Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(
       entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
-    try {
-      admin.replicateWALEntry(p.getFirst(), p.getSecond()).get();
-    } catch (InterruptedException e) {
-      throw (IOException) new InterruptedIOException().initCause(e);
-    } catch (ExecutionException e) {
-      Throwable cause = e.getCause();
-      Throwables.propagateIfPossible(cause, IOException.class);
-      throw new IOException(e);
-    }
+    FutureUtils.get(admin.replicateWALEntry(p.getFirst(), p.getSecond()));
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 9dce52a..7c9141e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2409,8 +2409,7 @@ public class HRegionServer extends HasThread implements
 
     // submit it to be handled by one of the handlers so that we do not block OpenRegionHandler
     if (this.executorService != null) {
-      this.executorService.submit(new RegionReplicaFlushHandler(this, clusterConnection,
-          rpcRetryingCallerFactory, rpcControllerFactory, operationTimeout, region));
+      this.executorService.submit(new RegionReplicaFlushHandler(this, region));
     }
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index 81b6d7e..0729203 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -20,26 +20,23 @@ package org.apache.hadoop.hbase.regionserver.handler;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.FlushRegionCallable;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 
 /**
  * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to wal in
@@ -56,20 +53,13 @@ public class RegionReplicaFlushHandler extends EventHandler {
 
   private static final Logger LOG = LoggerFactory.getLogger(RegionReplicaFlushHandler.class);
 
-  private final ClusterConnection connection;
-  private final RpcRetryingCallerFactory rpcRetryingCallerFactory;
-  private final RpcControllerFactory rpcControllerFactory;
-  private final int operationTimeout;
+  private final AsyncClusterConnection connection;
+
   private final HRegion region;
 
-  public RegionReplicaFlushHandler(Server server, ClusterConnection connection,
-      RpcRetryingCallerFactory rpcRetryingCallerFactory, RpcControllerFactory rpcControllerFactory,
-      int operationTimeout, HRegion region) {
+  public RegionReplicaFlushHandler(Server server, HRegion region) {
     super(server, EventType.RS_REGION_REPLICA_FLUSH);
-    this.connection = connection;
-    this.rpcRetryingCallerFactory = rpcRetryingCallerFactory;
-    this.rpcControllerFactory = rpcControllerFactory;
-    this.operationTimeout = operationTimeout;
+    this.connection = server.getAsyncClusterConnection();
     this.region = region;
   }
 
@@ -103,7 +93,7 @@ public class RegionReplicaFlushHandler extends EventHandler {
     return numRetries;
   }
 
-  void triggerFlushInPrimaryRegion(final HRegion region) throws IOException, RuntimeException {
+  void triggerFlushInPrimaryRegion(final HRegion region) throws IOException {
     long pause = connection.getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
 
@@ -117,45 +107,59 @@ public class RegionReplicaFlushHandler extends EventHandler {
     }
     while (!region.isClosing() && !region.isClosed()
         && !server.isAborted() && !server.isStopped()) {
-      FlushRegionCallable flushCallable = new FlushRegionCallable(
-        connection, rpcControllerFactory,
-        RegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()), true);
-
       // TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we
       // do not have to wait for the whole flush here, just initiate it.
-      FlushRegionResponse response = null;
+      FlushRegionResponse response;
       try {
-         response = rpcRetryingCallerFactory.<FlushRegionResponse>newCaller()
-          .callWithRetries(flushCallable, this.operationTimeout);
-      } catch (IOException ex) {
-        if (ex instanceof TableNotFoundException
-            || connection.isTableDisabled(region.getRegionInfo().getTable())) {
+        response = FutureUtils.get(connection.flush(ServerRegionReplicaUtil
+          .getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionName(), true));
+      } catch (IOException e) {
+        if (e instanceof TableNotFoundException || FutureUtils
+          .get(connection.getAdmin().isTableDisabled(region.getRegionInfo().getTable()))) {
           return;
         }
-        throw ex;
+        if (!counter.shouldRetry()) {
+          throw e;
+        }
+        // The reason that why we need to retry here is that, the retry for asynchronous admin
+        // request is much simpler than the normal operation, if we failed to locate the region once
+        // then we will throw the exception out and will not try to relocate again. So here we need
+        // to add some retries by ourselves to prevent shutting down the region server too
+        // frequent...
+        LOG.debug("Failed to trigger a flush of primary region replica {} of region {}, retry={}",
+          ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
+            .getRegionNameAsString(),
+          region.getRegionInfo().getRegionNameAsString(), counter.getAttemptTimes(), e);
+        try {
+          counter.sleepUntilNextRetry();
+        } catch (InterruptedException e1) {
+          throw new InterruptedIOException(e1.getMessage());
+        }
+        continue;
       }
 
       if (response.getFlushed()) {
         // then we have to wait for seeing the flush entry. All reads will be rejected until we see
         // a complete flush cycle or replay a region open event
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Successfully triggered a flush of primary region replica "
-              + ServerRegionReplicaUtil
-                .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName()
-                + " of region " + region.getRegionInfo().getEncodedName()
-                + " Now waiting and blocking reads until observing a full flush cycle");
+          LOG.debug("Successfully triggered a flush of primary region replica " +
+            ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
+              .getRegionNameAsString() +
+            " of region " + region.getRegionInfo().getRegionNameAsString() +
+            " Now waiting and blocking reads until observing a full flush cycle");
         }
         region.setReadsEnabled(true);
         break;
       } else {
         if (response.hasWroteFlushWalMarker()) {
-          if(response.getWroteFlushWalMarker()) {
+          if (response.getWroteFlushWalMarker()) {
             if (LOG.isDebugEnabled()) {
-              LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary "
-                  + "region replica " + ServerRegionReplicaUtil
-                    .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName()
-                  + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and "
-                  + "blocking reads until observing a flush marker");
+              LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " +
+                "region replica " +
+                ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
+                  .getRegionNameAsString() +
+                " of region " + region.getRegionInfo().getRegionNameAsString() +
+                " Now waiting and " + "blocking reads until observing a flush marker");
             }
             region.setReadsEnabled(true);
             break;
@@ -164,15 +168,23 @@ public class RegionReplicaFlushHandler extends EventHandler {
             // closing or already flushing. Retry flush again after some sleep.
             if (!counter.shouldRetry()) {
               throw new IOException("Cannot cause primary to flush or drop a wal marker after " +
-                  "retries. Failing opening of this region replica "
-                  + region.getRegionInfo().getEncodedName());
+                counter.getAttemptTimes() + " retries. Failing opening of this region replica " +
+                region.getRegionInfo().getRegionNameAsString());
+            } else {
+              LOG.warn(
+                "Cannot cause primary replica {} to flush or drop a wal marker " +
+                  "for region replica {}, retry={}",
+                ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
+                  .getRegionNameAsString(),
+                region.getRegionInfo().getRegionNameAsString(), counter.getAttemptTimes());
             }
           }
         } else {
           // nothing to do. Are we dealing with an old server?
-          LOG.warn("Was not able to trigger a flush from primary region due to old server version? "
-              + "Continuing to open the secondary region replica: "
-              + region.getRegionInfo().getEncodedName());
+          LOG.warn(
+            "Was not able to trigger a flush from primary region due to old server version? " +
+              "Continuing to open the secondary region replica: " +
+              region.getRegionInfo().getRegionNameAsString());
           region.setReadsEnabled(true);
           break;
         }


[hbase] 14/27: HBASE-22241 Fix TestRegionServerCoprocessorEndpoint

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 060f98fefe24f632330b19906e3ac38b49a6e2d2
Author: zhangduo <zh...@apache.org>
AuthorDate: Sun Apr 14 23:09:49 2019 +0800

    HBASE-22241 Fix TestRegionServerCoprocessorEndpoint
---
 .../hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java
index f180884..6d93ffc 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerE
 import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyResponse;
 import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyService;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
@@ -101,8 +100,7 @@ public class TestRegionServerCoprocessorEndpoint {
         DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback);
     assertEquals(null, rpcCallback.get());
     assertTrue(controller.failedOnException());
-    assertEquals(WHAT_TO_THROW.getClass().getName().trim(),
-        ((RemoteWithExtrasException) controller.getFailedOn().getCause()).getClassName().trim());
+    assertEquals(WHAT_TO_THROW.getClass(), controller.getFailedOn().getCause().getClass());
   }
 
   public static class DummyRegionServerEndpoint extends DummyService


[hbase] 13/27: HBASE-21718 Implement Admin based on AsyncAdmin

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9df28d8a638b464d48ad9edc9533cb3b900abb6d
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Fri Apr 12 15:08:11 2019 +0800

    HBASE-21718 Implement Admin based on AsyncAdmin
---
 .../hadoop/hbase/backup/util/RestoreTool.java      |   2 +-
 .../apache/hadoop/hbase/backup/TestBackupBase.java |   6 +-
 .../hbase/backup/TestBackupDeleteRestore.java      |   4 +-
 .../hadoop/hbase/backup/TestBackupMerge.java       |   4 +-
 .../hbase/backup/TestBackupMultipleDeletes.java    |   5 +-
 .../hadoop/hbase/backup/TestBackupSystemTable.java |   2 +-
 .../hadoop/hbase/backup/TestFullBackupSet.java     |   4 +-
 .../hbase/backup/TestFullBackupSetRestoreSet.java  |   6 +-
 .../hadoop/hbase/backup/TestFullRestore.java       |  16 +-
 .../hadoop/hbase/backup/TestIncrementalBackup.java |   7 +-
 .../backup/TestIncrementalBackupDeleteTable.java   |   7 +-
 .../TestIncrementalBackupMergeWithFailures.java    |   4 +-
 .../backup/TestIncrementalBackupWithBulkLoad.java  |   6 +-
 .../backup/TestIncrementalBackupWithFailures.java  |   5 +-
 .../hadoop/hbase/backup/TestRemoteBackup.java      |   4 +-
 .../hadoop/hbase/backup/TestRemoteRestore.java     |   4 +-
 .../hbase/backup/TestRestoreBoundaryTests.java     |   6 +-
 .../hbase/backup/TestSystemTableSnapshot.java      |   4 +-
 .../hadoop/hbase/AsyncMetaTableAccessor.java       |  29 +-
 .../java/org/apache/hadoop/hbase/client/Admin.java |  50 +-
 .../hadoop/hbase/client/AdminOverAsyncAdmin.java   | 945 +++++++++++++++++++++
 .../client/ConnectionOverAsyncConnection.java      |   2 +-
 .../hadoop/hbase/client/ConnectionUtils.java       |  18 +
 .../client/CoprocessorBlockingRpcCallback.java     |  68 ++
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |   9 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  37 +-
 .../client/RegionCoprocessorRpcChannelImpl.java    |  21 +-
 .../hbase/client/SyncCoprocessorRpcChannel.java    |   3 +
 .../hadoop/hbase/client/TableOverAsyncTable.java   |  51 +-
 .../hadoop/hbase/client/TestInterfaceAlign.java    |   2 +
 .../apache/hadoop/hbase/PerformanceEvaluation.java |   6 +-
 .../hadoop/hbase/rest/client/TestRemoteTable.java  |   4 +-
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |  42 +-
 .../org/apache/hadoop/hbase/client/TestAdmin1.java |  40 +-
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |  35 +-
 .../hadoop/hbase/client/TestFromClientSide.java    |   7 +-
 .../hadoop/hbase/client/TestFromClientSide3.java   |  10 +-
 .../client/TestSnapshotDFSTemporaryDirectory.java  |   5 +-
 .../client/TestSnapshotTemporaryDirectory.java     |  12 +-
 .../hbase/client/TestSplitOrMergeStatus.java       |  13 +-
 .../hbase/coprocessor/TestMasterObserver.java      |   2 +-
 .../org/apache/hadoop/hbase/master/TestMaster.java |   4 +-
 .../hbase/master/TestMasterMetricsWrapper.java     |   4 +-
 .../master/TestMergeTableRegionsWhileRSCrash.java  |   2 +-
 .../hbase/master/TestSplitRegionWhileRSCrash.java  |   2 +-
 .../master/assignment/TestAssignmentOnRSCrash.java |   2 +-
 .../TestMasterAbortWhileMergingTable.java          |   2 +-
 .../assignment/TestModifyTableWhileMerging.java    |   2 +-
 .../TestCleanupCompactedFileOnRegionClose.java     |   6 +-
 .../regionserver/TestEndToEndSplitTransaction.java |   9 +-
 .../TestNewVersionBehaviorFromClientSide.java      |   2 +-
 .../hbase/regionserver/TestRegionServerAbort.java  |   2 +-
 .../replication/regionserver/TestReplicator.java   |   4 +-
 .../hbase/snapshot/SnapshotTestingUtils.java       |  29 -
 .../snapshot/TestFlushSnapshotFromClient.java      |  41 +-
 .../hadoop/hbase/tool/TestBulkLoadHFiles.java      |   6 +-
 .../hbase/thrift2/ThriftHBaseServiceHandler.java   |   6 +-
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   |   9 +-
 58 files changed, 1283 insertions(+), 356 deletions(-)

diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index 92254fa..e03bfe4 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -486,7 +486,7 @@ public class RestoreTool {
         LOG.info("Creating target table '" + targetTableName + "'");
         byte[][] keys;
         if (regionDirList == null || regionDirList.size() == 0) {
-          admin.createTable(htd, null);
+          admin.createTable(htd);
         } else {
           keys = generateBoundaryKeys(regionDirList);
           // create table using table descriptor and region boundaries
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index e0fca20..64978bc 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -26,7 +26,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Objects;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
@@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
@@ -342,7 +340,7 @@ public class TestBackupBase {
   @AfterClass
   public static void tearDown() throws Exception {
     try{
-      SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
+      SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin());
     } catch (Exception e) {
     }
     SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
@@ -416,7 +414,7 @@ public class TestBackupBase {
   protected static void createTables() throws Exception {
     long tid = System.currentTimeMillis();
     table1 = TableName.valueOf("test-" + tid);
-    HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
+    Admin ha = TEST_UTIL.getAdmin();
 
     // Create namespaces
     NamespaceDescriptor desc1 = NamespaceDescriptor.create("ns1").build();
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
index 74176e3..f649b92 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
@@ -24,8 +24,8 @@ import java.util.List;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -61,7 +61,7 @@ public class TestBackupDeleteRestore extends TestBackupBase {
     assertTrue(checkSucceeded(backupId));
     LOG.info("backup complete");
     int numRows = TEST_UTIL.countRows(table1);
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     // delete row
     try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
       Delete delete = new Delete(Bytes.toBytes("row0"));
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
index beacef3..1a8638c 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
@@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.Assert;
@@ -62,7 +62,7 @@ public class TestBackupMerge extends TestBackupBase {
 
     Connection conn = ConnectionFactory.createConnection(conf1);
 
-    HBaseAdmin admin = (HBaseAdmin) conn.getAdmin();
+    Admin admin = conn.getAdmin();
     BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
index bffa480..538488b 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
@@ -26,9 +26,9 @@ import java.util.Set;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -59,9 +59,8 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
     // #1 - create full backup for all tables
     LOG.info("create full backup image for all tables");
     List<TableName> tables = Lists.newArrayList(table1, table2);
-    HBaseAdmin admin = null;
     Connection conn = ConnectionFactory.createConnection(conf1);
-    admin = (HBaseAdmin) conn.getAdmin();
+    Admin admin = conn.getAdmin();
     BackupAdmin client = new BackupAdminImpl(conn);
     BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
     String backupIdFull = client.backupTables(request);
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
index aa6e5dd..5d48fc5 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
@@ -119,7 +119,7 @@ public class TestBackupSystemTable {
   }
 
   private void cleanBackupTable() throws IOException {
-    Admin admin = UTIL.getHBaseAdmin();
+    Admin admin = UTIL.getAdmin();
     admin.disableTable(BackupSystemTable.getTableName(conf));
     admin.truncateTable(BackupSystemTable.getTableName(conf), true);
     if (admin.isTableDisabled(BackupSystemTable.getTableName(conf))) {
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
index 89ff571..7a3aec4 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
@@ -25,7 +25,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.ClassRule;
@@ -80,7 +80,7 @@ public class TestFullBackupSet extends TestBackupBase {
       // Run backup
       ret = ToolRunner.run(conf1, new RestoreDriver(), args);
       assertTrue(ret == 0);
-      HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+      Admin hba = TEST_UTIL.getAdmin();
       assertTrue(hba.tableExists(table1_restore));
       // Verify number of rows in both tables
       assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
index ca70f6a..3543133 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
@@ -25,7 +25,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.ClassRule;
@@ -76,7 +76,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase {
       // Run backup
       ret = ToolRunner.run(conf1, new RestoreDriver(), args);
       assertTrue(ret == 0);
-      HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+      Admin hba = TEST_UTIL.getAdmin();
       assertTrue(hba.tableExists(table1_restore));
       // Verify number of rows in both tables
       assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
@@ -118,7 +118,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase {
       // Run backup
       ret = ToolRunner.run(conf1, new RestoreDriver(), args);
       assertTrue(ret == 0);
-      HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+      Admin hba = TEST_UTIL.getAdmin();
       assertTrue(hba.tableExists(table1));
       // Verify number of rows in both tables
       assertEquals(count, TEST_UTIL.countRows(table1));
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
index 2201e2f..f5ad0d7 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
@@ -26,7 +26,7 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.ClassRule;
@@ -66,7 +66,7 @@ public class TestFullRestore extends TestBackupBase {
     BackupAdmin client = getBackupAdmin();
     client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
       tableset, tablemap, false));
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table1_restore));
     TEST_UTIL.deleteTable(table1_restore);
     hba.close();
@@ -88,7 +88,7 @@ public class TestFullRestore extends TestBackupBase {
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
 
     assertTrue(ret == 0);
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table1_restore));
     TEST_UTIL.deleteTable(table1_restore);
     hba.close();
@@ -110,7 +110,7 @@ public class TestFullRestore extends TestBackupBase {
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
     assertTrue(ret == 0);
     //Verify that table has not been restored
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertFalse(hba.tableExists(table1_restore));
   }
 
@@ -131,7 +131,7 @@ public class TestFullRestore extends TestBackupBase {
     BackupAdmin client = getBackupAdmin();
     client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false,
       restore_tableset, tablemap, false));
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table2_restore));
     assertTrue(hba.tableExists(table3_restore));
     TEST_UTIL.deleteTable(table2_restore);
@@ -162,7 +162,7 @@ public class TestFullRestore extends TestBackupBase {
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
 
     assertTrue(ret == 0);
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table2_restore));
     assertTrue(hba.tableExists(table3_restore));
     TEST_UTIL.deleteTable(table2_restore);
@@ -210,7 +210,7 @@ public class TestFullRestore extends TestBackupBase {
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
     assertTrue(ret == 0);
 
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table1));
     hba.close();
   }
@@ -256,7 +256,7 @@ public class TestFullRestore extends TestBackupBase {
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
 
     assertTrue(ret == 0);
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table2));
     assertTrue(hba.tableExists(table3));
     hba.close();
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index 35a77ea..d7c2cd0 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -29,9 +29,9 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -93,8 +93,7 @@ public class TestIncrementalBackup extends TestBackupBase {
       int NB_ROWS_FAM3 = 6;
       insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
       insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
-      HBaseAdmin admin = null;
-      admin = (HBaseAdmin) conn.getAdmin();
+      Admin admin = conn.getAdmin();
       BackupAdminImpl client = new BackupAdminImpl(conn);
       BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
       String backupIdFull = client.backupTables(request);
@@ -182,7 +181,7 @@ public class TestIncrementalBackup extends TestBackupBase {
                 tablesRestoreFull, tablesMapFull, true));
 
       // #6.1 - check tables for full restore
-      HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+      Admin hAdmin = TEST_UTIL.getAdmin();
       assertTrue(hAdmin.tableExists(table1_restore));
       assertTrue(hAdmin.tableExists(table2_restore));
       hAdmin.close();
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
index 08834f2..837de4d 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
@@ -24,9 +24,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -64,9 +64,8 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
     LOG.info("create full backup image for all tables");
 
     List<TableName> tables = Lists.newArrayList(table1, table2);
-    HBaseAdmin admin = null;
     Connection conn = ConnectionFactory.createConnection(conf1);
-    admin = (HBaseAdmin) conn.getAdmin();
+    Admin admin = conn.getAdmin();
     BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
@@ -105,7 +104,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
       tablesRestoreFull, tablesMapFull, false));
 
     // #5.1 - check tables for full restore
-    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+    Admin hAdmin = TEST_UTIL.getAdmin();
     assertTrue(hAdmin.tableExists(table1_restore));
     assertTrue(hAdmin.tableExists(table2_restore));
 
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
index 7351258..1bde63b 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
@@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
 import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Pair;
@@ -235,7 +235,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
 
     Connection conn = ConnectionFactory.createConnection(conf1);
 
-    HBaseAdmin admin = (HBaseAdmin) conn.getAdmin();
+    Admin admin = conn.getAdmin();
     BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
index 4b02077..60aa635 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
@@ -26,9 +26,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -70,7 +70,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
 
     List<TableName> tables = Lists.newArrayList(table1);
     Connection conn = ConnectionFactory.createConnection(conf1);
-    HBaseAdmin admin = (HBaseAdmin) conn.getAdmin();
+    Admin admin = conn.getAdmin();
     BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
@@ -119,7 +119,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     // Delete all data in table1
     TEST_UTIL.deleteTableData(table1);
     // #5.1 - check tables for full restore */
-    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+    Admin hAdmin = TEST_UTIL.getAdmin();
 
     // #6 - restore incremental backup for table1
     TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
index f6725d9..546cf41 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
 import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -90,8 +90,7 @@ public class TestIncrementalBackupWithFailures extends TestBackupBase {
     int NB_ROWS_FAM3 = 6;
     insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
 
-    HBaseAdmin admin = null;
-    admin = (HBaseAdmin) conn.getAdmin();
+    Admin admin = conn.getAdmin();
     BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
index 05826e2..2d99e0d 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -26,9 +26,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
@@ -126,7 +126,7 @@ public class TestRemoteBackup extends TestBackupBase {
       tablesRestoreFull, tablesMapFull, false));
 
     // check tables for full restore
-    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+    Admin hAdmin = TEST_UTIL.getAdmin();
     assertTrue(hAdmin.tableExists(table1_restore));
 
     // #5.2 - checking row count of tables for full restore
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
index 25ebca2..d670144 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.assertTrue;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -61,7 +61,7 @@ public class TestRemoteRestore extends TestBackupBase {
     getBackupAdmin().restore(
       BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset,
         tablemap, false));
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table1_restore));
     TEST_UTIL.deleteTable(table1_restore);
     hba.close();
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
index 07f57cc..a6808cd 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
@@ -23,7 +23,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -55,7 +55,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
     getBackupAdmin().restore(
       BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap,
         false));
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table1_restore));
     TEST_UTIL.deleteTable(table1_restore);
   }
@@ -76,7 +76,7 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
     getBackupAdmin().restore(
       BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset,
         tablemap, false));
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     assertTrue(hba.tableExists(table2_restore));
     assertTrue(hba.tableExists(table3_restore));
     TEST_UTIL.deleteTable(table2_restore);
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
index b93fa77..bd29512 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.backup;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.ClassRule;
 import org.junit.experimental.categories.Category;
@@ -47,7 +47,7 @@ public class TestSystemTableSnapshot extends TestBackupBase {
 
     TableName backupSystem = BackupSystemTable.getTableName(conf1);
 
-    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    Admin hba = TEST_UTIL.getAdmin();
     String snapshotName = "sysTable";
     hba.snapshot(snapshotName, backupSystem);
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
index 4a886d1..d04ea52 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -80,23 +79,17 @@ public class AsyncMetaTableAccessor {
       TableName tableName) {
     CompletableFuture<Optional<TableState>> future = new CompletableFuture<>();
     Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn());
-    long time = EnvironmentEdgeManager.currentTime();
-    try {
-      get.setTimeRange(0, time);
-      addListener(metaTable.get(get), (result, error) -> {
-        if (error != null) {
-          future.completeExceptionally(error);
-          return;
-        }
-        try {
-          future.complete(getTableState(result));
-        } catch (IOException e) {
-          future.completeExceptionally(e);
-        }
-      });
-    } catch (IOException ioe) {
-      future.completeExceptionally(ioe);
-    }
+    addListener(metaTable.get(get), (result, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      try {
+        future.complete(getTableState(result));
+      } catch (IOException e) {
+        future.completeExceptionally(e);
+      }
+    });
     return future;
   }
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 14abb6e..707f5e7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -255,13 +254,14 @@ public interface Admin extends Abortable, Closeable {
   Future<Void> createTableAsync(TableDescriptor desc) throws IOException;
 
   /**
-   * Creates a new table but does not block and wait for it to come online. You can use
-   * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
-   * ExecutionException if there was an error while executing the operation or TimeoutException in
-   * case the wait timeout was not long enough to allow the operation to complete.
-   * <p/>
-   * Throws IllegalArgumentException Bad table name, if the split keys are repeated and if the split
-   * key has empty byte array.
+   * Creates a new table but does not block and wait for it to come online.
+   * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
+   * It may throw ExecutionException if there was an error while executing the operation
+   * or TimeoutException in case the wait timeout was not long enough to allow the
+   * operation to complete.
+   * Throws IllegalArgumentException Bad table name, if the split keys
+   *    are repeated and if the split key has empty byte array.
+   *
    * @param desc table descriptor for table
    * @param splitKeys keys to check if the table has been created with all split keys
    * @throws IOException if a remote or network exception occurs
@@ -699,29 +699,7 @@ public interface Admin extends Abortable, Closeable {
   void move(byte[] encodedRegionName) throws IOException;
 
   /**
-   * Move the region <code>rencodedRegionName</code> to <code>destServerName</code>.
-   * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
-   *          suffix: e.g. if regionname is
-   *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
-   *          then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
-   * @param destServerName The servername of the destination regionserver. If passed the empty byte
-   *          array we'll assign to a random server. A server name is made of host, port and
-   *          startcode. Here is an example: <code> host187.example.com,60020,1289493121758</code>
-   * @throws IOException if we can't find a region named <code>encodedRegionName</code>
-   * @deprecated Use {@link #move(byte[], ServerName)} instead. And if you want to move the region
-   *             to a random server, please use {@link #move(byte[])}.
-   */
-  @Deprecated
-  default void move(byte[] encodedRegionName, byte[] destServerName) throws IOException {
-    if (destServerName == null || destServerName.length == 0) {
-      move(encodedRegionName);
-    } else {
-      move(encodedRegionName, ServerName.valueOf(Bytes.toString(destServerName)));
-    }
-  }
-
-  /**
-   * Move the region <code>rencodedRegionName</code> to <code>destServerName</code>.
+   * Move the region <code>encodedRegionName</code> to <code>destServerName</code>.
    * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
    *          suffix: e.g. if regionname is
    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
@@ -1063,9 +1041,7 @@ public interface Admin extends Abortable, Closeable {
    * @return a {@link RegionMetrics} list of all regions hosted on a region server
    * @throws IOException if a remote or network exception occurs
    */
-  default List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
-    return getRegionMetrics(serverName, null);
-  }
+  List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException;
 
   /**
    * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table.
@@ -1654,7 +1630,10 @@ public interface Admin extends Abortable, Closeable {
    * </pre></blockquote></div>
    *
    * @return A MasterCoprocessorRpcChannel instance
+   * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
+   *             more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
    */
+  @Deprecated
   CoprocessorRpcChannel coprocessorService();
 
 
@@ -1679,7 +1658,10 @@ public interface Admin extends Abortable, Closeable {
    *
    * @param serverName the server name to which the endpoint call is made
    * @return A RegionServerCoprocessorRpcChannel instance
+   * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
+   *             more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
    */
+  @Deprecated
   CoprocessorRpcChannel coprocessorService(ServerName serverName);
 
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
new file mode 100644
index 0000000..599e5d6
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
@@ -0,0 +1,945 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError;
+import static org.apache.hadoop.hbase.util.FutureUtils.get;
+
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcChannel;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.regex.Pattern;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CacheEvictionStats;
+import org.apache.hadoop.hbase.ClusterMetrics;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.replication.TableCFs;
+import org.apache.hadoop.hbase.client.security.SecurityCapability;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.quotas.QuotaFilter;
+import org.apache.hadoop.hbase.quotas.QuotaSettings;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView;
+import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
+import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.UserPermission;
+import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The {@link Admin} implementation which is based on an {@link AsyncAdmin}.
+ */
+@InterfaceAudience.Private
+class AdminOverAsyncAdmin implements Admin {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AdminOverAsyncAdmin.class);
+
+  private volatile boolean aborted = false;
+
+  private final Connection conn;
+
+  private final RawAsyncHBaseAdmin admin;
+
+  private final int operationTimeout;
+
+  private final int syncWaitTimeout;
+
+  public AdminOverAsyncAdmin(Connection conn, RawAsyncHBaseAdmin admin) {
+    this.conn = conn;
+    this.admin = admin;
+    this.operationTimeout = conn.getConfiguration().getInt(
+      HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+    this.syncWaitTimeout =
+      conn.getConfiguration().getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
+  }
+
+  @Override
+  public int getOperationTimeout() {
+    return operationTimeout;
+  }
+
+  @Override
+  public int getSyncWaitTimeout() {
+    return syncWaitTimeout;
+  }
+
+  @Override
+  public void abort(String why, Throwable e) {
+    LOG.warn("Aborting becasue of {}", why, e);
+    this.aborted = true;
+  }
+
+  @Override
+  public boolean isAborted() {
+    return aborted;
+  }
+
+  @Override
+  public Connection getConnection() {
+    return conn;
+  }
+
+  @Override
+  public boolean tableExists(TableName tableName) throws IOException {
+    return get(admin.tableExists(tableName));
+  }
+
+  @Override
+  public List<TableDescriptor> listTableDescriptors() throws IOException {
+    return get(admin.listTableDescriptors());
+  }
+
+  @Override
+  public List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables)
+      throws IOException {
+    return get(admin.listTableDescriptors(pattern, includeSysTables));
+  }
+
+  @Override
+  public TableName[] listTableNames() throws IOException {
+    return get(admin.listTableNames()).toArray(new TableName[0]);
+  }
+
+  @Override
+  public TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException {
+    return get(admin.listTableNames(pattern, includeSysTables)).toArray(new TableName[0]);
+  }
+
+  @Override
+  public TableDescriptor getDescriptor(TableName tableName)
+      throws TableNotFoundException, IOException {
+    return get(admin.getDescriptor(tableName));
+  }
+
+  @Override
+  public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions)
+      throws IOException {
+    get(admin.createTable(desc, startKey, endKey, numRegions));
+  }
+
+  @Override
+  public Future<Void> createTableAsync(TableDescriptor desc) throws IOException {
+    return admin.createTable(desc);
+  }
+
+  @Override
+  public Future<Void> createTableAsync(TableDescriptor desc, byte[][] splitKeys)
+      throws IOException {
+    return admin.createTable(desc, splitKeys);
+  }
+
+  @Override
+  public Future<Void> deleteTableAsync(TableName tableName) throws IOException {
+    return admin.deleteTable(tableName);
+  }
+
+  @Override
+  public Future<Void> truncateTableAsync(TableName tableName, boolean preserveSplits)
+      throws IOException {
+    return admin.truncateTable(tableName, preserveSplits);
+  }
+
+  @Override
+  public Future<Void> enableTableAsync(TableName tableName) throws IOException {
+    return admin.enableTable(tableName);
+  }
+
+  @Override
+  public Future<Void> disableTableAsync(TableName tableName) throws IOException {
+    return admin.disableTable(tableName);
+  }
+
+  @Override
+  public boolean isTableEnabled(TableName tableName) throws IOException {
+    return get(admin.isTableEnabled(tableName));
+  }
+
+  @Override
+  public boolean isTableDisabled(TableName tableName) throws IOException {
+    return get(admin.isTableDisabled(tableName));
+  }
+
+  @Override
+  public boolean isTableAvailable(TableName tableName) throws IOException {
+    return get(admin.isTableAvailable(tableName));
+  }
+
+  @Override
+  public Future<Void> addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily)
+      throws IOException {
+    return admin.addColumnFamily(tableName, columnFamily);
+  }
+
+  @Override
+  public Future<Void> deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily)
+      throws IOException {
+    return admin.deleteColumnFamily(tableName, columnFamily);
+  }
+
+  @Override
+  public Future<Void> modifyColumnFamilyAsync(TableName tableName,
+      ColumnFamilyDescriptor columnFamily) throws IOException {
+    return admin.modifyColumnFamily(tableName, columnFamily);
+  }
+
+  @Override
+  public List<RegionInfo> getRegions(ServerName serverName) throws IOException {
+    return get(admin.getRegions(serverName));
+  }
+
+  @Override
+  public void flush(TableName tableName) throws IOException {
+    get(admin.flush(tableName));
+  }
+
+  @Override
+  public void flushRegion(byte[] regionName) throws IOException {
+    get(admin.flushRegion(regionName));
+  }
+
+  @Override
+  public void flushRegionServer(ServerName serverName) throws IOException {
+    get(admin.flushRegionServer(serverName));
+  }
+
+  @Override
+  public void compact(TableName tableName) throws IOException {
+    get(admin.compact(tableName));
+  }
+
+  @Override
+  public void compactRegion(byte[] regionName) throws IOException {
+    get(admin.compactRegion(regionName));
+  }
+
+  @Override
+  public void compact(TableName tableName, byte[] columnFamily) throws IOException {
+    get(admin.compact(tableName, columnFamily));
+  }
+
+  @Override
+  public void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException {
+    get(admin.compactRegion(regionName, columnFamily));
+  }
+
+  @Override
+  public void compact(TableName tableName, CompactType compactType)
+      throws IOException, InterruptedException {
+    get(admin.compact(tableName, compactType));
+  }
+
+  @Override
+  public void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
+      throws IOException, InterruptedException {
+    get(admin.compact(tableName, columnFamily, compactType));
+  }
+
+  @Override
+  public void majorCompact(TableName tableName) throws IOException {
+    get(admin.majorCompact(tableName));
+  }
+
+  @Override
+  public void majorCompactRegion(byte[] regionName) throws IOException {
+    get(admin.majorCompactRegion(regionName));
+  }
+
+  @Override
+  public void majorCompact(TableName tableName, byte[] columnFamily) throws IOException {
+    get(admin.majorCompact(tableName, columnFamily));
+  }
+
+  @Override
+  public void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException {
+    get(admin.majorCompactRegion(regionName, columnFamily));
+  }
+
+  @Override
+  public void majorCompact(TableName tableName, CompactType compactType)
+      throws IOException, InterruptedException {
+    get(admin.majorCompact(tableName, compactType));
+  }
+
+  @Override
+  public void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
+      throws IOException, InterruptedException {
+    get(admin.majorCompact(tableName, columnFamily, compactType));
+  }
+
+  @Override
+  public Map<ServerName, Boolean> compactionSwitch(boolean switchState,
+      List<String> serverNamesList) throws IOException {
+    return get(admin.compactionSwitch(switchState, serverNamesList));
+  }
+
+  @Override
+  public void compactRegionServer(ServerName serverName) throws IOException {
+    get(admin.compactRegionServer(serverName));
+  }
+
+  @Override
+  public void majorCompactRegionServer(ServerName serverName) throws IOException {
+    get(admin.majorCompactRegionServer(serverName));
+  }
+
+  @Override
+  public void move(byte[] encodedRegionName) throws IOException {
+    get(admin.move(encodedRegionName));
+  }
+
+  @Override
+  public void move(byte[] encodedRegionName, ServerName destServerName) throws IOException {
+    get(admin.move(encodedRegionName, destServerName));
+  }
+
+  @Override
+  public void assign(byte[] regionName) throws IOException {
+    get(admin.assign(regionName));
+  }
+
+  @Override
+  public void unassign(byte[] regionName, boolean force) throws IOException {
+    get(admin.unassign(regionName, force));
+  }
+
+  @Override
+  public void offline(byte[] regionName) throws IOException {
+    get(admin.offline(regionName));
+  }
+
+  @Override
+  public boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException {
+    return get(admin.balancerSwitch(onOrOff, synchronous));
+  }
+
+  @Override
+  public boolean balance() throws IOException {
+    return get(admin.balance());
+  }
+
+  @Override
+  public boolean balance(boolean force) throws IOException {
+    return get(admin.balance(force));
+  }
+
+  @Override
+  public boolean isBalancerEnabled() throws IOException {
+    return get(admin.isBalancerEnabled());
+  }
+
+  @Override
+  public CacheEvictionStats clearBlockCache(TableName tableName) throws IOException {
+    return get(admin.clearBlockCache(tableName));
+  }
+
+  @Override
+  public boolean normalize() throws IOException {
+    return get(admin.normalize());
+  }
+
+  @Override
+  public boolean isNormalizerEnabled() throws IOException {
+    return get(admin.isNormalizerEnabled());
+  }
+
+  @Override
+  public boolean normalizerSwitch(boolean on) throws IOException {
+    return get(admin.normalizerSwitch(on));
+  }
+
+  @Override
+  public boolean catalogJanitorSwitch(boolean onOrOff) throws IOException {
+    return get(admin.catalogJanitorSwitch(onOrOff));
+  }
+
+  @Override
+  public int runCatalogJanitor() throws IOException {
+    return get(admin.runCatalogJanitor());
+  }
+
+  @Override
+  public boolean isCatalogJanitorEnabled() throws IOException {
+    return get(admin.isCatalogJanitorEnabled());
+  }
+
+  @Override
+  public boolean cleanerChoreSwitch(boolean onOrOff) throws IOException {
+    return get(admin.cleanerChoreSwitch(onOrOff));
+  }
+
+  @Override
+  public boolean runCleanerChore() throws IOException {
+    return get(admin.runCleanerChore());
+  }
+
+  @Override
+  public boolean isCleanerChoreEnabled() throws IOException {
+    return get(admin.isCleanerChoreEnabled());
+  }
+
+  @Override
+  public Future<Void> mergeRegionsAsync(byte[][] nameOfRegionsToMerge, boolean forcible)
+      throws IOException {
+    return admin.mergeRegions(Arrays.asList(nameOfRegionsToMerge), forcible);
+  }
+
+  @Override
+  public void split(TableName tableName) throws IOException {
+    get(admin.split(tableName));
+  }
+
+  @Override
+  public void split(TableName tableName, byte[] splitPoint) throws IOException {
+    get(admin.split(tableName, splitPoint));
+  }
+
+  @Override
+  public Future<Void> splitRegionAsync(byte[] regionName) throws IOException {
+    return admin.splitRegion(regionName);
+  }
+
+  @Override
+  public Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException {
+    return admin.splitRegion(regionName, splitPoint);
+  }
+
+  @Override
+  public Future<Void> modifyTableAsync(TableDescriptor td) throws IOException {
+    return admin.modifyTable(td);
+  }
+
+  @Override
+  public void shutdown() throws IOException {
+    get(admin.shutdown());
+  }
+
+  @Override
+  public void stopMaster() throws IOException {
+    get(admin.stopMaster());
+  }
+
+  @Override
+  public boolean isMasterInMaintenanceMode() throws IOException {
+    return get(admin.isMasterInMaintenanceMode());
+  }
+
+  @Override
+  public void stopRegionServer(String hostnamePort) throws IOException {
+    get(admin.stopRegionServer(ServerName.valueOf(hostnamePort, 0)));
+  }
+
+  @Override
+  public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException {
+    return get(admin.getClusterMetrics(options));
+  }
+
+  @Override
+  public List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
+    return get(admin.getRegionMetrics(serverName));
+  }
+
+  @Override
+  public List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName)
+      throws IOException {
+    return get(admin.getRegionMetrics(serverName, tableName));
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+    return conn.getConfiguration();
+  }
+
+  @Override
+  public Future<Void> createNamespaceAsync(NamespaceDescriptor descriptor) throws IOException {
+    return admin.createNamespace(descriptor);
+  }
+
+  @Override
+  public Future<Void> modifyNamespaceAsync(NamespaceDescriptor descriptor) throws IOException {
+    return admin.modifyNamespace(descriptor);
+  }
+
+  @Override
+  public Future<Void> deleteNamespaceAsync(String name) throws IOException {
+    return admin.deleteNamespace(name);
+  }
+
+  @Override
+  public NamespaceDescriptor getNamespaceDescriptor(String name)
+      throws NamespaceNotFoundException, IOException {
+    return get(admin.getNamespaceDescriptor(name));
+  }
+
+  @Override
+  public String[] listNamespaces() throws IOException {
+    return get(admin.listNamespaces()).toArray(new String[0]);
+  }
+
+  @Override
+  public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
+    return get(admin.listNamespaceDescriptors()).toArray(new NamespaceDescriptor[0]);
+  }
+
+  @Override
+  public List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException {
+    return get(admin.listTableDescriptorsByNamespace(Bytes.toString(name)));
+  }
+
+  @Override
+  public TableName[] listTableNamesByNamespace(String name) throws IOException {
+    return get(admin.listTableNamesByNamespace(name)).toArray(new TableName[0]);
+  }
+
+  @Override
+  public List<RegionInfo> getRegions(TableName tableName) throws IOException {
+    return get(admin.getRegions(tableName));
+  }
+
+  @Override
+  public void close() {
+    // do nothing, AsyncAdmin is not a Closeable.
+  }
+
+  @Override
+  public List<TableDescriptor> listTableDescriptors(List<TableName> tableNames) throws IOException {
+    return get(admin.listTableDescriptors(tableNames));
+  }
+
+  @Override
+  public Future<Boolean> abortProcedureAsync(long procId, boolean mayInterruptIfRunning)
+      throws IOException {
+    return admin.abortProcedure(procId, mayInterruptIfRunning);
+  }
+
+  @Override
+  public String getProcedures() throws IOException {
+    return get(admin.getProcedures());
+  }
+
+  @Override
+  public String getLocks() throws IOException {
+    return get(admin.getLocks());
+  }
+
+  @Override
+  public void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException {
+    get(admin.rollWALWriter(serverName));
+  }
+
+  @Override
+  public CompactionState getCompactionState(TableName tableName) throws IOException {
+    return get(admin.getCompactionState(tableName));
+  }
+
+  @Override
+  public CompactionState getCompactionState(TableName tableName, CompactType compactType)
+      throws IOException {
+    return get(admin.getCompactionState(tableName, compactType));
+  }
+
+  @Override
+  public CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException {
+    return get(admin.getCompactionStateForRegion(regionName));
+  }
+
+  @Override
+  public long getLastMajorCompactionTimestamp(TableName tableName) throws IOException {
+    return get(admin.getLastMajorCompactionTimestamp(tableName)).orElse(0L);
+  }
+
+  @Override
+  public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
+    return get(admin.getLastMajorCompactionTimestampForRegion(regionName)).orElse(0L);
+  }
+
+  @Override
+  public void snapshot(SnapshotDescription snapshot)
+      throws IOException, SnapshotCreationException, IllegalArgumentException {
+    get(admin.snapshot(snapshot));
+  }
+
+  @Override
+  public Future<Void> snapshotAsync(SnapshotDescription snapshot)
+      throws IOException, SnapshotCreationException {
+    return admin.snapshot(snapshot);
+  }
+
+  @Override
+  public boolean isSnapshotFinished(SnapshotDescription snapshot)
+      throws IOException, HBaseSnapshotException, UnknownSnapshotException {
+    return get(admin.isSnapshotFinished(snapshot));
+  }
+
+  @Override
+  public void restoreSnapshot(String snapshotName) throws IOException, RestoreSnapshotException {
+    get(admin.restoreSnapshot(snapshotName));
+  }
+
+  @Override
+  public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
+      throws IOException, RestoreSnapshotException {
+    get(admin.restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl));
+  }
+
+  @Override
+  public Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName,
+      boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException {
+    return admin.cloneSnapshot(snapshotName, tableName, restoreAcl);
+  }
+
+  @Override
+  public void execProcedure(String signature, String instance, Map<String, String> props)
+      throws IOException {
+    get(admin.execProcedure(signature, instance, props));
+  }
+
+  @Override
+  public byte[] execProcedureWithReturn(String signature, String instance,
+      Map<String, String> props) throws IOException {
+    return get(admin.execProcedureWithReturn(signature, instance, props));
+  }
+
+  @Override
+  public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
+      throws IOException {
+    return get(admin.isProcedureFinished(signature, instance, props));
+  }
+
+  @Override
+  public List<SnapshotDescription> listSnapshots() throws IOException {
+    return get(admin.listSnapshots());
+  }
+
+  @Override
+  public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
+    return get(admin.listSnapshots(pattern));
+  }
+
+  @Override
+  public List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
+      Pattern snapshotNamePattern) throws IOException {
+    return get(admin.listTableSnapshots(tableNamePattern, snapshotNamePattern));
+  }
+
+  @Override
+  public void deleteSnapshot(String snapshotName) throws IOException {
+    get(admin.deleteSnapshot(snapshotName));
+  }
+
+  @Override
+  public void deleteSnapshots(Pattern pattern) throws IOException {
+    get(admin.deleteSnapshots(pattern));
+  }
+
+  @Override
+  public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
+      throws IOException {
+    get(admin.deleteTableSnapshots(tableNamePattern, snapshotNamePattern));
+  }
+
+  @Override
+  public void setQuota(QuotaSettings quota) throws IOException {
+    get(admin.setQuota(quota));
+  }
+
+  @Override
+  public List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException {
+    return get(admin.getQuota(filter));
+  }
+
+  @SuppressWarnings("deprecation")
+  private static final class SyncCoprocessorRpcChannelOverAsync implements CoprocessorRpcChannel {
+
+    private final RpcChannel delegate;
+
+    public SyncCoprocessorRpcChannelOverAsync(RpcChannel delegate) {
+      this.delegate = delegate;
+    }
+
+    @Override
+    public void callMethod(MethodDescriptor method, RpcController controller, Message request,
+        Message responsePrototype, RpcCallback<Message> done) {
+      ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
+      CoprocessorBlockingRpcCallback<Message> callback = new CoprocessorBlockingRpcCallback<>();
+      delegate.callMethod(method, c, request, responsePrototype, callback);
+      Message ret;
+      try {
+        ret = callback.get();
+      } catch (IOException e) {
+        setCoprocessorError(controller, e);
+        return;
+      }
+      if (c.failed()) {
+        setCoprocessorError(controller, c.getFailed());
+      }
+      done.run(ret);
+    }
+
+    @Override
+    public Message callBlockingMethod(MethodDescriptor method, RpcController controller,
+        Message request, Message responsePrototype) throws ServiceException {
+      ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
+      CoprocessorBlockingRpcCallback<Message> done = new CoprocessorBlockingRpcCallback<>();
+      callMethod(method, c, request, responsePrototype, done);
+      Message ret;
+      try {
+        ret = done.get();
+      } catch (IOException e) {
+        throw new ServiceException(e);
+      }
+      if (c.failed()) {
+        setCoprocessorError(controller, c.getFailed());
+        throw new ServiceException(c.getFailed());
+      }
+      return ret;
+    }
+  }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public CoprocessorRpcChannel coprocessorService() {
+    return new SyncCoprocessorRpcChannelOverAsync(
+      new MasterCoprocessorRpcChannelImpl(admin.<Message> newMasterCaller()));
+  }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public CoprocessorRpcChannel coprocessorService(ServerName serverName) {
+    return new SyncCoprocessorRpcChannelOverAsync(new RegionServerCoprocessorRpcChannelImpl(
+      admin.<Message> newServerCaller().serverName(serverName)));
+  }
+
+  @Override
+  public void updateConfiguration(ServerName server) throws IOException {
+    get(admin.updateConfiguration(server));
+  }
+
+  @Override
+  public void updateConfiguration() throws IOException {
+    get(admin.updateConfiguration());
+  }
+
+  @Override
+  public List<SecurityCapability> getSecurityCapabilities() throws IOException {
+    return get(admin.getSecurityCapabilities());
+  }
+
+  @Override
+  public boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException {
+    return get(admin.splitSwitch(enabled, synchronous));
+  }
+
+  @Override
+  public boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException {
+    return get(admin.mergeSwitch(enabled, synchronous));
+  }
+
+  @Override
+  public boolean isSplitEnabled() throws IOException {
+    return get(admin.isSplitEnabled());
+  }
+
+  @Override
+  public boolean isMergeEnabled() throws IOException {
+    return get(admin.isMergeEnabled());
+  }
+
+  @Override
+  public Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
+      boolean enabled) throws IOException {
+    return admin.addReplicationPeer(peerId, peerConfig, enabled);
+  }
+
+  @Override
+  public Future<Void> removeReplicationPeerAsync(String peerId) throws IOException {
+    return admin.removeReplicationPeer(peerId);
+  }
+
+  @Override
+  public Future<Void> enableReplicationPeerAsync(String peerId) throws IOException {
+    return admin.enableReplicationPeer(peerId);
+  }
+
+  @Override
+  public Future<Void> disableReplicationPeerAsync(String peerId) throws IOException {
+    return admin.disableReplicationPeer(peerId);
+  }
+
+  @Override
+  public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws IOException {
+    return get(admin.getReplicationPeerConfig(peerId));
+  }
+
+  @Override
+  public Future<Void> updateReplicationPeerConfigAsync(String peerId,
+      ReplicationPeerConfig peerConfig) throws IOException {
+    return admin.updateReplicationPeerConfig(peerId, peerConfig);
+  }
+
+  @Override
+  public List<ReplicationPeerDescription> listReplicationPeers() throws IOException {
+    return get(admin.listReplicationPeers());
+  }
+
+  @Override
+  public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws IOException {
+    return get(admin.listReplicationPeers(pattern));
+  }
+
+  @Override
+  public Future<Void> transitReplicationPeerSyncReplicationStateAsync(String peerId,
+      SyncReplicationState state) throws IOException {
+    return admin.transitReplicationPeerSyncReplicationState(peerId, state);
+  }
+
+  @Override
+  public void decommissionRegionServers(List<ServerName> servers, boolean offload)
+      throws IOException {
+    get(admin.decommissionRegionServers(servers, offload));
+  }
+
+  @Override
+  public List<ServerName> listDecommissionedRegionServers() throws IOException {
+    return get(admin.listDecommissionedRegionServers());
+  }
+
+  @Override
+  public void recommissionRegionServer(ServerName server, List<byte[]> encodedRegionNames)
+      throws IOException {
+    get(admin.recommissionRegionServer(server, encodedRegionNames));
+  }
+
+  @Override
+  public List<TableCFs> listReplicatedTableCFs() throws IOException {
+    return get(admin.listReplicatedTableCFs());
+  }
+
+  @Override
+  public void enableTableReplication(TableName tableName) throws IOException {
+    get(admin.enableTableReplication(tableName));
+  }
+
+  @Override
+  public void disableTableReplication(TableName tableName) throws IOException {
+    get(admin.disableTableReplication(tableName));
+  }
+
+  @Override
+  public void clearCompactionQueues(ServerName serverName, Set<String> queues)
+      throws IOException, InterruptedException {
+    get(admin.clearCompactionQueues(serverName, queues));
+  }
+
+  @Override
+  public List<ServerName> clearDeadServers(List<ServerName> servers) throws IOException {
+    return get(admin.clearDeadServers(servers));
+  }
+
+  @Override
+  public void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits)
+      throws IOException {
+    get(admin.cloneTableSchema(tableName, newTableName, preserveSplits));
+  }
+
+  @Override
+  public boolean switchRpcThrottle(boolean enable) throws IOException {
+    return get(admin.switchRpcThrottle(enable));
+  }
+
+  @Override
+  public boolean isRpcThrottleEnabled() throws IOException {
+    return get(admin.isRpcThrottleEnabled());
+  }
+
+  @Override
+  public boolean exceedThrottleQuotaSwitch(boolean enable) throws IOException {
+    return get(admin.exceedThrottleQuotaSwitch(enable));
+  }
+
+  @Override
+  public Map<TableName, Long> getSpaceQuotaTableSizes() throws IOException {
+    return get(admin.getSpaceQuotaTableSizes());
+  }
+
+  @Override
+  public Map<TableName, ? extends SpaceQuotaSnapshotView> getRegionServerSpaceQuotaSnapshots(
+      ServerName serverName) throws IOException {
+    return get(admin.getRegionServerSpaceQuotaSnapshots(serverName));
+  }
+
+  @Override
+  public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(String namespace) throws IOException {
+    return get(admin.getCurrentSpaceQuotaSnapshot(namespace));
+  }
+
+  @Override
+  public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(TableName tableName)
+      throws IOException {
+    return get(admin.getCurrentSpaceQuotaSnapshot(tableName));
+  }
+
+  @Override
+  public void grant(UserPermission userPermission, boolean mergeExistingPermissions)
+      throws IOException {
+    get(admin.grant(userPermission, mergeExistingPermissions));
+  }
+
+  @Override
+  public void revoke(UserPermission userPermission) throws IOException {
+    get(admin.revoke(userPermission));
+  }
+
+  @Override
+  public List<UserPermission> getUserPermissions(
+      GetUserPermissionsRequest getUserPermissionsRequest) throws IOException {
+    return get(admin.getUserPermissions(getUserPermissionsRequest));
+  }
+
+  @Override
+  public List<Boolean> hasUserPermissions(String userName, List<Permission> permissions)
+      throws IOException {
+    return get(admin.hasUserPermissions(userName, permissions));
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
index 61cc708..dfe7d8f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
@@ -102,7 +102,7 @@ class ConnectionOverAsyncConnection implements Connection {
 
   @Override
   public Admin getAdmin() throws IOException {
-    return oldConn.getAdmin();
+    return new AdminOverAsyncAdmin(this, (RawAsyncHBaseAdmin) conn.getAdmin());
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 2fa30b5..a6c47b5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
@@ -715,4 +716,21 @@ public final class ConnectionUtils {
       pool.shutdownNow();
     }
   }
+
+  static void setCoprocessorError(com.google.protobuf.RpcController controller, Throwable error) {
+    if (controller == null) {
+      return;
+    }
+    if (controller instanceof ServerRpcController) {
+      if (error instanceof IOException) {
+        ((ServerRpcController) controller).setFailedOn((IOException) error);
+      } else {
+        ((ServerRpcController) controller).setFailedOn(new IOException(error));
+      }
+    } else if (controller instanceof ClientCoprocessorRpcController) {
+      ((ClientCoprocessorRpcController) controller).setFailed(error);
+    } else {
+      controller.setFailed(error.toString());
+    }
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java
new file mode 100644
index 0000000..30f6e7e
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import com.google.protobuf.RpcCallback;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * For implementation coprocessor related methods in {@link Table} and {@link Admin} interface.
+ * @deprecated since 3.0.0, will be removed in 4.0.0 along with the coprocessor related methods in
+ *             {@link Table} and {@link Admin} interface.
+ */
+@Deprecated
+@InterfaceAudience.Private
+class CoprocessorBlockingRpcCallback<R> implements RpcCallback<R> {
+  private R result;
+  private boolean resultSet = false;
+
+  /**
+   * Called on completion of the RPC call with the response object, or {@code null} in the case of
+   * an error.
+   * @param parameter the response object or {@code null} if an error occurred
+   */
+  @Override
+  public void run(R parameter) {
+    synchronized (this) {
+      result = parameter;
+      resultSet = true;
+      this.notifyAll();
+    }
+  }
+
+  /**
+   * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
+   * passed. When used asynchronously, this method will block until the {@link #run(Object)} method
+   * has been called.
+   * @return the response object or {@code null} if no response was passed
+   */
+  public synchronized R get() throws IOException {
+    while (!resultSet) {
+      try {
+        this.wait();
+      } catch (InterruptedException ie) {
+        InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
+        exception.initCause(ie);
+        throw exception;
+      }
+    }
+    return result;
+  }
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index c466e61..9c62678 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -1156,10 +1156,10 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public void move(byte[] encodedRegionName) throws IOException {
-    move(encodedRegionName, (ServerName) null);
+    move(encodedRegionName, null);
   }
 
-  public void move(final byte[] encodedRegionName, ServerName destServerName) throws IOException {
+  public void move(byte[] encodedRegionName, ServerName destServerName) throws IOException {
     executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
       @Override
       protected Void rpcCall() throws Exception {
@@ -3910,6 +3910,11 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
+  public List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
+    return getRegionMetrics(serverName, null);
+  }
+
+  @Override
   public Future<Void> createTableAsync(TableDescriptor desc) throws IOException {
     return createTableAsync(desc, null);
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index b3d3468..47a7902 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -360,7 +360,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
     this.ng = connection.getNonceGenerator();
   }
 
-  private <T> MasterRequestCallerBuilder<T> newMasterCaller() {
+  <T> MasterRequestCallerBuilder<T> newMasterCaller() {
     return this.connection.callerFactory.<T> masterRequest()
       .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
       .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
@@ -702,11 +702,6 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
 
   @Override
   public CompletableFuture<Boolean> isTableAvailable(TableName tableName) {
-    return isTableAvailable(tableName, Optional.empty());
-  }
-
-  private CompletableFuture<Boolean> isTableAvailable(TableName tableName,
-      Optional<byte[][]> splitKeys) {
     if (TableName.isMetaTableName(tableName)) {
       return connection.registry.getMetaRegionLocation().thenApply(locs -> Stream
         .of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null));
@@ -740,35 +735,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
               future.complete(false);
               return;
             }
-
-            Optional<Boolean> available =
-              splitKeys.map(keys -> compareRegionsWithSplitKeys(locations, keys));
-            future.complete(available.orElse(true));
+            future.complete(true);
           });
       }
     });
     return future;
   }
 
-  private boolean compareRegionsWithSplitKeys(List<HRegionLocation> locations, byte[][] splitKeys) {
-    int regionCount = 0;
-    for (HRegionLocation location : locations) {
-      RegionInfo info = location.getRegion();
-      if (Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-        regionCount++;
-        continue;
-      }
-      for (byte[] splitKey : splitKeys) {
-        // Just check if the splitkey is available
-        if (Bytes.equals(info.getStartKey(), splitKey)) {
-          regionCount++;
-          break;
-        }
-      }
-    }
-    return regionCount == splitKeys.length + 1;
-  }
-
   @Override
   public CompletableFuture<Void> addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) {
     return this.<AddColumnRequest, AddColumnResponse> procedureCall(tableName,
@@ -2004,10 +1977,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
                     LOG.error(
                       "Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName,
                       err3);
-                    future.completeExceptionally(err3);
-                  } else {
-                    future.complete(ret3);
                   }
+                  future.complete(ret3);
                 });
               }
             });
@@ -3393,7 +3364,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
       .call();
   }
 
-  private <T> ServerRequestCallerBuilder<T> newServerCaller() {
+  <T> ServerRequestCallerBuilder<T> newServerCaller() {
     return this.connection.callerFactory.<T> serverRequest()
       .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
       .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java
index 3c25c57..b41727f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannelImpl.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError;
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import com.google.protobuf.Descriptors.MethodDescriptor;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -101,23 +101,6 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
     return future;
   }
 
-  protected final void setError(RpcController controller, Throwable error) {
-    if (controller == null) {
-      return;
-    }
-    if (controller instanceof ServerRpcController) {
-      if (error instanceof IOException) {
-        ((ServerRpcController) controller).setFailedOn((IOException) error);
-      } else {
-        ((ServerRpcController) controller).setFailedOn(new IOException(error));
-      }
-    } else if (controller instanceof ClientCoprocessorRpcController) {
-      ((ClientCoprocessorRpcController) controller).setFailed(error);
-    } else {
-      controller.setFailed(error.toString());
-    }
-  }
-
   @Override
   public void callMethod(MethodDescriptor method, RpcController controller, Message request,
       Message responsePrototype, RpcCallback<Message> done) {
@@ -128,7 +111,7 @@ class RegionCoprocessorRpcChannelImpl implements RpcChannel {
         .action((c, l, s) -> rpcCall(method, request, responsePrototype, c, l, s)).call(),
       (r, e) -> {
         if (e != null) {
-          setError(controller, e);
+          setCoprocessorError(controller, e);
         }
         done.run(r);
       });
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java
index 6b4419d..2811219 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SyncCoprocessorRpcChannel.java
@@ -36,7 +36,10 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
  * call coprocessor endpoint {@link com.google.protobuf.Service}s.
  * Note that clients should not use this class directly, except through
  * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}.
+ * @deprecated Please stop using this class again, as it is too low level, which is part of the rpc
+ *             framework for HBase. Will be deleted in 4.0.0.
  */
+@Deprecated
 @InterfaceAudience.Public
 abstract class SyncCoprocessorRpcChannel implements CoprocessorRpcChannel {
   private static final Logger LOG = LoggerFactory.getLogger(SyncCoprocessorRpcChannel.class);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java
index d581611..30e3062 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.apache.hadoop.hbase.client.ConnectionUtils.setCoprocessorError;
+
 import com.google.protobuf.Descriptors.MethodDescriptor;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcCallback;
@@ -298,44 +300,7 @@ class TableOverAsyncTable implements Table {
   public void close() {
   }
 
-  private static final class BlockingRpcCallback<R> implements RpcCallback<R> {
-    private R result;
-    private boolean resultSet = false;
-
-    /**
-     * Called on completion of the RPC call with the response object, or {@code null} in the case of
-     * an error.
-     * @param parameter the response object or {@code null} if an error occurred
-     */
-    @Override
-    public void run(R parameter) {
-      synchronized (this) {
-        result = parameter;
-        resultSet = true;
-        this.notifyAll();
-      }
-    }
-
-    /**
-     * Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
-     * passed. When used asynchronously, this method will block until the {@link #run(Object)}
-     * method has been called.
-     * @return the response object or {@code null} if no response was passed
-     */
-    public synchronized R get() throws IOException {
-      while (!resultSet) {
-        try {
-          this.wait();
-        } catch (InterruptedException ie) {
-          InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
-          exception.initCause(ie);
-          throw exception;
-        }
-      }
-      return result;
-    }
-  }
-
+  @SuppressWarnings("deprecation")
   private static final class RegionCoprocessorRpcChannel extends RegionCoprocessorRpcChannelImpl
       implements CoprocessorRpcChannel {
 
@@ -348,17 +313,17 @@ class TableOverAsyncTable implements Table {
     public void callMethod(MethodDescriptor method, RpcController controller, Message request,
         Message responsePrototype, RpcCallback<Message> done) {
       ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
-      BlockingRpcCallback<Message> callback = new BlockingRpcCallback<>();
+      CoprocessorBlockingRpcCallback<Message> callback = new CoprocessorBlockingRpcCallback<>();
       super.callMethod(method, c, request, responsePrototype, callback);
       Message ret;
       try {
         ret = callback.get();
       } catch (IOException e) {
-        setError(controller, e);
+        setCoprocessorError(controller, e);
         return;
       }
       if (c.failed()) {
-        setError(controller, c.getFailed());
+        setCoprocessorError(controller, c.getFailed());
       }
       done.run(ret);
     }
@@ -367,7 +332,7 @@ class TableOverAsyncTable implements Table {
     public Message callBlockingMethod(MethodDescriptor method, RpcController controller,
         Message request, Message responsePrototype) throws ServiceException {
       ClientCoprocessorRpcController c = new ClientCoprocessorRpcController();
-      BlockingRpcCallback<Message> done = new BlockingRpcCallback<>();
+      CoprocessorBlockingRpcCallback<Message> done = new CoprocessorBlockingRpcCallback<>();
       callMethod(method, c, request, responsePrototype, done);
       Message ret;
       try {
@@ -376,7 +341,7 @@ class TableOverAsyncTable implements Table {
         throw new ServiceException(e);
       }
       if (c.failed()) {
-        setError(controller, c.getFailed());
+        setCoprocessorError(controller, c.getFailed());
         throw new ServiceException(c.getFailed());
       }
       return ret;
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java
index 953fba7..3c8b04d 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java
@@ -57,6 +57,8 @@ public class TestInterfaceAlign {
     adminMethodNames.removeAll(getMethodNames(Abortable.class));
     adminMethodNames.removeAll(getMethodNames(Closeable.class));
 
+    asyncAdminMethodNames.remove("coprocessorService");
+
     adminMethodNames.forEach(method -> {
       boolean contains = asyncAdminMethodNames.contains(method);
       if (method.endsWith("Async")) {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 2dae0e8..7972da0 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -383,7 +383,11 @@ public class PerformanceEvaluation extends Configured implements Tool {
           }
         }
       }
-      admin.createTable(desc, splits);
+      if (splits != null) {
+        admin.createTable(desc, splits);
+      } else {
+        admin.createTable(desc);
+      }
       LOG.info("Table " + desc + " created");
     }
     return admin.tableExists(tableName);
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 1d7a37c..d5247fb 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -600,8 +600,8 @@ public class TestRemoteTable {
     REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
 
     // Truncate the test table for inserting test scenarios rows keys
-    TEST_UTIL.getHBaseAdmin().disableTable(TABLE);
-    TEST_UTIL.getHBaseAdmin().truncateTable(TABLE, false);
+    TEST_UTIL.getAdmin().disableTable(TABLE);
+    TEST_UTIL.getAdmin().truncateTable(TABLE, false);
 
     remoteTable = new RemoteHTable(
         new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())),
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 58a3f10..afca997 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -74,7 +74,6 @@ import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Hbck;
 import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
 import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
@@ -1590,7 +1589,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       builder.setColumnFamily(cfdb.build());
     }
     TableDescriptor td = builder.build();
-    getAdmin().createTable(td, splitKeys);
+    if (splitKeys != null) {
+      getAdmin().createTable(td, splitKeys);
+    } else {
+      getAdmin().createTable(td);
+    }
     // HBaseAdmin only waits for regions to appear in hbase:meta
     // we should wait until they are assigned
     waitUntilAllRegionsAssigned(td.getTableName());
@@ -1613,7 +1616,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
            .setNewVersionBehavior(true).build());
       }
     }
-    getAdmin().createTable(builder.build(), splitRows);
+    if (splitRows != null) {
+      getAdmin().createTable(builder.build(), splitRows);
+    } else {
+      getAdmin().createTable(builder.build());
+    }
     // HBaseAdmin only waits for regions to appear in hbase:meta
     // we should wait until they are assigned
     waitUntilAllRegionsAssigned(htd.getTableName());
@@ -1682,7 +1689,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
       }
       desc.addFamily(hcd);
     }
-    getAdmin().createTable(desc, splitKeys);
+    if (splitKeys != null) {
+      getAdmin().createTable(desc, splitKeys);
+    } else {
+      getAdmin().createTable(desc);
+    }
     // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
     // assigned
     waitUntilAllRegionsAssigned(tableName);
@@ -3031,36 +3042,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
   }
 
   /**
-   * Returns a Admin instance.
-   * This instance is shared between HBaseTestingUtility instance users. Closing it has no effect,
-   * it will be closed automatically when the cluster shutdowns
-   *
-   * @return HBaseAdmin instance which is guaranteed to support only {@link Admin} interface.
-   *   Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted
-   *   anytime.
-   * @deprecated Since 2.0. Will be removed in 3.0. Use {@link #getAdmin()} instead.
-   */
-  @Deprecated
-  public synchronized HBaseAdmin getHBaseAdmin()
-  throws IOException {
-    if (hbaseAdmin == null){
-      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
-    }
-    return hbaseAdmin;
-  }
-
-  /**
    * Returns an Admin instance which is shared between HBaseTestingUtility instance users.
    * Closing it has no effect, it will be closed automatically when the cluster shutdowns
    */
   public synchronized Admin getAdmin() throws IOException {
     if (hbaseAdmin == null){
-      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
+      this.hbaseAdmin = getConnection().getAdmin();
     }
     return hbaseAdmin;
   }
 
-  private HBaseAdmin hbaseAdmin = null;
+  private Admin hbaseAdmin = null;
 
   /**
    * Returns an {@link Hbck} instance. Needs be closed when done.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index 8e9afed..538917d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
@@ -1080,7 +1081,11 @@ public class TestAdmin1 {
 
       // Split the table
       if (async) {
-        ADMIN.split(tableName, splitPoint);
+        if (splitPoint != null) {
+          ADMIN.split(tableName, splitPoint);
+        } else {
+          ADMIN.split(tableName);
+        }
         final AtomicInteger count = new AtomicInteger(0);
         Thread t = new Thread("CheckForSplit") {
           @Override public void run() {
@@ -1205,7 +1210,8 @@ public class TestAdmin1 {
     // the element at index 1 would be a replica (since the metareader gives us ordered
     // regions). Try splitting that region via the split API . Should fail
     try {
-      TEST_UTIL.getAdmin().splitRegionAsync(regions.get(1).getFirst().getRegionName()).get();
+      FutureUtils.get(
+        TEST_UTIL.getAdmin().splitRegionAsync(regions.get(1).getFirst().getRegionName()));
     } catch (IllegalArgumentException ex) {
       gotException = true;
     }
@@ -1215,9 +1221,9 @@ public class TestAdmin1 {
     // regions). Try splitting that region via a different split API (the difference is
     // this API goes direct to the regionserver skipping any checks in the admin). Should fail
     try {
-      TEST_UTIL.getHBaseAdmin().splitRegionAsync(regions.get(1).getFirst(),
-          new byte[]{(byte)'1'});
-    } catch (IOException ex) {
+      FutureUtils.get(TEST_UTIL.getAdmin().splitRegionAsync(
+        regions.get(1).getFirst().getEncodedNameAsBytes(), new byte[] { (byte) '1' }));
+    } catch (IllegalArgumentException ex) {
       gotException = true;
     }
     assertTrue(gotException);
@@ -1225,8 +1231,8 @@ public class TestAdmin1 {
     gotException = false;
     //testing Sync split operation
     try {
-      TEST_UTIL.getHBaseAdmin().splitRegionSync(regions.get(1).getFirst().getRegionName(),
-          new byte[]{(byte)'1'});
+      FutureUtils.get(TEST_UTIL.getAdmin()
+        .splitRegionAsync(regions.get(1).getFirst().getRegionName(), new byte[] { (byte) '1' }));
     } catch (IllegalArgumentException ex) {
       gotException = true;
     }
@@ -1235,10 +1241,10 @@ public class TestAdmin1 {
     gotException = false;
     // Try merging a replica with another. Should fail.
     try {
-      TEST_UTIL.getHBaseAdmin().mergeRegionsSync(
+      FutureUtils.get(TEST_UTIL.getAdmin().mergeRegionsAsync(
         regions.get(1).getFirst().getEncodedNameAsBytes(),
         regions.get(2).getFirst().getEncodedNameAsBytes(),
-        true);
+        true));
     } catch (IllegalArgumentException m) {
       gotException = true;
     }
@@ -1246,12 +1252,12 @@ public class TestAdmin1 {
     // Try going to the master directly (that will skip the check in admin)
     try {
       byte[][] nameofRegionsToMerge = new byte[2][];
-      nameofRegionsToMerge[0] =  regions.get(1).getFirst().getEncodedNameAsBytes();
+      nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes();
       nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
       MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(
         nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
-      ((ConnectionImplementation) TEST_UTIL.getAdmin().getConnection()).getMaster()
-        .mergeTableRegions(null, request);
+      TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().mergeTableRegions(null,
+        request);
     } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException m) {
       Throwable t = m.getCause();
       do {
@@ -1439,24 +1445,24 @@ public class TestAdmin1 {
       List<RegionInfo> tableRegions = ADMIN.getRegions(tableName);
       // 0
       try {
-        ADMIN.mergeRegionsAsync(new byte[0][0], false).get();
+        FutureUtils.get(ADMIN.mergeRegionsAsync(new byte[0][0], false));
         fail();
       } catch (IllegalArgumentException e) {
         // expected
       }
       // 1
       try {
-        ADMIN.mergeRegionsAsync(new byte[][] { tableRegions.get(0).getEncodedNameAsBytes() }, false)
-          .get();
+        FutureUtils.get(ADMIN
+          .mergeRegionsAsync(new byte[][] { tableRegions.get(0).getEncodedNameAsBytes() }, false));
         fail();
       } catch (IllegalArgumentException e) {
         // expected
       }
       // 3
       try {
-        ADMIN.mergeRegionsAsync(
+        FutureUtils.get(ADMIN.mergeRegionsAsync(
           tableRegions.stream().map(RegionInfo::getEncodedNameAsBytes).toArray(byte[][]::new),
-          false).get();
+          false));
         fail();
       } catch (DoNotRetryIOException e) {
         // expected
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 58a8bc5..2644061 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
@@ -59,7 +58,6 @@ import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -211,7 +209,7 @@ public class TestAdmin2 {
       // Use 80 bit numbers to make sure we aren't limited
       byte [] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
       byte [] endKey =   { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
-      Admin hbaseadmin = TEST_UTIL.getHBaseAdmin();
+      Admin hbaseadmin = TEST_UTIL.getAdmin();
       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
       htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
       hbaseadmin.createTable(htd, startKey, endKey, expectedRegions);
@@ -391,14 +389,14 @@ public class TestAdmin2 {
       isInList);
   }
 
-  private HBaseAdmin createTable(TableName tableName) throws IOException {
-    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+  private Admin createTable(TableName tableName) throws IOException {
+    Admin admin = TEST_UTIL.getAdmin();
 
     HTableDescriptor htd = new HTableDescriptor(tableName);
     HColumnDescriptor hcd = new HColumnDescriptor("value");
 
     htd.addFamily(hcd);
-    admin.createTable(htd, null);
+    admin.createTable(htd);
     return admin;
   }
 
@@ -411,7 +409,7 @@ public class TestAdmin2 {
     HColumnDescriptor hcd = new HColumnDescriptor("value");
     htd.addFamily(hcd);
 
-    ADMIN.createTable(htd, null);
+    ADMIN.createTable(htd);
   }
 
   /**
@@ -588,7 +586,7 @@ public class TestAdmin2 {
         new HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName())));
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1"));
     htd.addFamily(hcd);
-    TEST_UTIL.getHBaseAdmin().createTable(htd);
+    TEST_UTIL.getAdmin().createTable(htd);
   }
 
   @Test
@@ -607,27 +605,6 @@ public class TestAdmin2 {
   }
 
   @Test
-  public void testGetRegion() throws Exception {
-    // We use actual HBaseAdmin instance instead of going via Admin interface in
-    // here because makes use of an internal HBA method (TODO: Fix.).
-    HBaseAdmin rawAdmin = TEST_UTIL.getHBaseAdmin();
-
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    LOG.info("Started " + tableName);
-    Table t = TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
-
-    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
-      HRegionLocation regionLocation = locator.getRegionLocation(Bytes.toBytes("mmm"));
-      RegionInfo region = regionLocation.getRegionInfo();
-      byte[] regionName = region.getRegionName();
-      Pair<RegionInfo, ServerName> pair = rawAdmin.getRegion(regionName);
-      assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
-      pair = rawAdmin.getRegion(region.getEncodedNameAsBytes());
-      assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
-    }
-  }
-
-  @Test
   public void testBalancer() throws Exception {
     boolean initialState = ADMIN.isBalancerEnabled();
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 6d27044..46bd729 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -4249,8 +4249,7 @@ public class TestFromClientSide {
     final TableName tableName = TableName.valueOf(name.getMethodName());
     TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
     try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
-      try (Table t = conn.getTable(tableName);
-           Admin admin = conn.getAdmin()) {
+      try (Table t = conn.getTable(tableName); Admin admin = conn.getAdmin()) {
         assertTrue(admin.tableExists(tableName));
         assertTrue(t.get(new Get(ROW)).isEmpty());
       }
@@ -4269,8 +4268,8 @@ public class TestFromClientSide {
       boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
       try (Admin admin = conn.getAdmin()) {
         assertTrue(admin.tableExists(tableName));
-        assertTrue(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
-                .getLiveServerMetrics().size() == SLAVES + (tablesOnMaster ? 1 : 0));
+        assertTrue(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics()
+          .size() == SLAVES + (tablesOnMaster ? 1 : 0));
       }
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index 83becbc..3b3f636 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -272,13 +272,13 @@ public class TestFromClientSide3 {
     TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);
 
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10)) {
+    try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 10)) {
       TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS);
       Admin admin = TEST_UTIL.getAdmin();
 
       // Create 3 store files.
       byte[] row = Bytes.toBytes(random.nextInt());
-      performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 100);
+      performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 100);
 
       try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
         // Verify we have multiple store files.
@@ -304,13 +304,13 @@ public class TestFromClientSide3 {
 
         // change the compaction.min config option for this table to 5
         LOG.info("hbase.hstore.compaction.min should now be 5");
-        HTableDescriptor htd = new HTableDescriptor(hTable.getDescriptor());
+        HTableDescriptor htd = new HTableDescriptor(table.getDescriptor());
         htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
         admin.modifyTable(htd);
         LOG.info("alter status finished");
 
         // Create 3 more store files.
-        performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 10);
+        performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 10);
 
         // Issue a compaction request
         admin.compact(tableName);
@@ -357,7 +357,7 @@ public class TestFromClientSide3 {
         htd.modifyFamily(hcd);
         admin.modifyTable(htd);
         LOG.info("alter status finished");
-        assertNull(hTable.getDescriptor().getColumnFamily(FAMILY)
+        assertNull(table.getDescriptor().getColumnFamily(FAMILY)
           .getValue(Bytes.toBytes("hbase.hstore.compaction.min")));
       }
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
index b4cef33..7501867 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java
@@ -50,10 +50,11 @@ public class TestSnapshotDFSTemporaryDirectory
    *
    * @throws Exception on failure
    */
-  @BeforeClass public static void setupCluster() throws Exception {
+  @BeforeClass
+  public static void setupCluster() throws Exception {
     setupConf(UTIL.getConfiguration());
     UTIL.startMiniCluster(NUM_RS);
-    admin = UTIL.getHBaseAdmin();
+    admin = UTIL.getAdmin();
   }
 
   private static void setupConf(Configuration conf) throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
index a8561d0..a945612 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
@@ -100,10 +100,11 @@ public class TestSnapshotTemporaryDirectory {
    *
    * @throws Exception on failure
    */
-  @BeforeClass public static void setupCluster() throws Exception {
+  @BeforeClass
+  public static void setupCluster() throws Exception {
     setupConf(UTIL.getConfiguration());
     UTIL.startMiniCluster(NUM_RS);
-    admin = UTIL.getHBaseAdmin();
+    admin = UTIL.getAdmin();
   }
 
   private static void setupConf(Configuration conf) {
@@ -136,7 +137,7 @@ public class TestSnapshotTemporaryDirectory {
 
   @After public void tearDown() throws Exception {
     UTIL.deleteTable(TABLE_NAME);
-    SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+    SnapshotTestingUtils.deleteAllSnapshots(UTIL.getAdmin());
     SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
   }
 
@@ -282,8 +283,9 @@ public class TestSnapshotTemporaryDirectory {
    *
    * @throws Exception if snapshot does not complete successfully
    */
-  @Test(timeout = 300000) public void testOfflineTableSnapshot() throws Exception {
-    Admin admin = UTIL.getHBaseAdmin();
+  @Test(timeout = 300000)
+  public void testOfflineTableSnapshot() throws Exception {
+    Admin admin = UTIL.getAdmin();
     // make sure we don't fail on listing snapshots
     SnapshotTestingUtils.assertNoSnapshots(admin);
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
index 3e40b6f..9a55838 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java
@@ -55,17 +55,11 @@ public class TestSplitOrMergeStatus {
   @Rule
   public TestName name = new TestName();
 
-  /**
-   * @throws java.lang.Exception
-   */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL.startMiniCluster(2);
   }
 
-  /**
-   * @throws java.lang.Exception
-   */
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
     TEST_UTIL.shutdownMiniCluster();
@@ -84,7 +78,12 @@ public class TestSplitOrMergeStatus {
     initSwitchStatus(admin);
     boolean result = admin.splitSwitch(false, false);
     assertTrue(result);
-    admin.split(t.getName());
+    try {
+      admin.split(t.getName());
+      fail();
+    } catch (IOException e) {
+      // expected
+    }
     int count = admin.getRegions(tableName).size();
     assertTrue(originalCount == count);
     result = admin.splitSwitch(true, false);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index 1bc3996..76618a5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -1342,7 +1342,7 @@ public class TestMasterObserver {
       List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
 
       admin.mergeRegionsAsync(regions.get(0).getRegionInfo().getEncodedNameAsBytes(),
-        regions.get(1).getRegionInfo().getEncodedNameAsBytes(), true);
+        regions.get(1).getRegionInfo().getEncodedNameAsBytes(), true).get();
       assertTrue("Coprocessor should have been called on region merge",
         cp.wasMergeRegionsCalled());
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 7c396c7..fa23d38 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -190,7 +190,7 @@ public class TestMaster {
     HColumnDescriptor hcd = new HColumnDescriptor("value");
     htd.addFamily(hcd);
 
-    admin.createTable(htd, null);
+    admin.createTable(htd);
     try {
       RegionInfo hri = RegionInfoBuilder.newBuilder(tableName)
           .setStartKey(Bytes.toBytes("A"))
@@ -213,7 +213,7 @@ public class TestMaster {
     HColumnDescriptor hcd = new HColumnDescriptor("value");
     htd.addFamily(hcd);
 
-    admin.createTable(htd, null);
+    admin.createTable(htd);
     try {
       List<RegionInfo> tableRegions = admin.getRegions(tableName);
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
index 1f0323e..355eb41 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
@@ -126,7 +126,7 @@ public class TestMasterMetricsWrapper {
       HTableDescriptor desc = new HTableDescriptor(table);
       byte[] FAMILY = Bytes.toBytes("FAMILY");
       desc.addFamily(new HColumnDescriptor(FAMILY));
-      TEST_UTIL.getHBaseAdmin().createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);
+      TEST_UTIL.getAdmin().createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);
 
       // wait till the table is assigned
       long timeoutTime = System.currentTimeMillis() + 1000;
@@ -148,7 +148,7 @@ public class TestMasterMetricsWrapper {
       assertEquals(5, regionNumberPair.getFirst().intValue());
       assertEquals(0, regionNumberPair.getSecond().intValue());
 
-      TEST_UTIL.getHBaseAdmin().offline(hri.getRegionName());
+      TEST_UTIL.getAdmin().offline(hri.getRegionName());
 
       timeoutTime = System.currentTimeMillis() + 800;
       RegionStates regionStates = master.getAssignmentManager().getRegionStates();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTableRegionsWhileRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTableRegionsWhileRSCrash.java
index 7cf794a..182695c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTableRegionsWhileRSCrash.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTableRegionsWhileRSCrash.java
@@ -69,7 +69,7 @@ public class TestMergeTableRegionsWhileRSCrash {
   @BeforeClass
   public static void setupCluster() throws Exception {
     UTIL.startMiniCluster(1);
-    admin = UTIL.getHBaseAdmin();
+    admin = UTIL.getAdmin();
     byte[][] splitKeys = new byte[1][];
     splitKeys[0] = SPLITKEY;
     TABLE = UTIL.createTable(TABLE_NAME, CF, splitKeys);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitRegionWhileRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitRegionWhileRSCrash.java
index fe5d1a2..a55deb0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitRegionWhileRSCrash.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitRegionWhileRSCrash.java
@@ -66,7 +66,7 @@ public class TestSplitRegionWhileRSCrash {
   @BeforeClass
   public static void setupCluster() throws Exception {
     UTIL.startMiniCluster(1);
-    admin = UTIL.getHBaseAdmin();
+    admin = UTIL.getAdmin();
     TABLE = UTIL.createTable(TABLE_NAME, CF);
     UTIL.waitTableAvailable(TABLE_NAME);
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
index 839d611..5673ed8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
@@ -106,7 +106,7 @@ public class TestAssignmentOnRSCrash {
       throws Exception {
     final int NROWS = 100;
     int nkilled = 0;
-    for (RegionInfo hri: UTIL.getHBaseAdmin().getRegions(TEST_TABLE)) {
+    for (RegionInfo hri: UTIL.getAdmin().getRegions(TEST_TABLE)) {
       ServerName serverName = AssignmentTestingUtil.getServerHoldingRegion(UTIL, hri);
       if (AssignmentTestingUtil.isServerHoldingMeta(UTIL, serverName)) continue;
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMasterAbortWhileMergingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMasterAbortWhileMergingTable.java
index 1af9bd0..6fcdb39 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMasterAbortWhileMergingTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMasterAbortWhileMergingTable.java
@@ -68,7 +68,7 @@ public class TestMasterAbortWhileMergingTable {
     UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
         MergeRegionObserver.class.getName());
     UTIL.startMiniCluster(3);
-    admin = UTIL.getHBaseAdmin();
+    admin = UTIL.getAdmin();
     byte[][] splitKeys = new byte[1][];
     splitKeys[0] = SPLITKEY;
     UTIL.createTable(TABLE_NAME, CF, splitKeys);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestModifyTableWhileMerging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestModifyTableWhileMerging.java
index 16ad373..0d77608 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestModifyTableWhileMerging.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestModifyTableWhileMerging.java
@@ -67,7 +67,7 @@ public class TestModifyTableWhileMerging {
     //Set procedure executor thread to 1, making reproducing this issue of HBASE-20921 easier
     UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
     UTIL.startMiniCluster(1);
-    admin = UTIL.getHBaseAdmin();
+    admin = UTIL.getAdmin();
     byte[][] splitKeys = new byte[1][];
     splitKeys[0] = SPLITKEY;
     client = UTIL.createTable(TABLE_NAME, CF, splitKeys);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.java
index 6ae68f8..0bf4e78 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.java
@@ -24,13 +24,12 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.util.Collection;
-
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -40,7 +39,6 @@ import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -78,7 +76,7 @@ public class TestCleanupCompactedFileOnRegionClose {
     byte[] familyNameBytes = Bytes.toBytes(familyName);
     util.createTable(tableName, familyName);
 
-    HBaseAdmin hBaseAdmin = util.getHBaseAdmin();
+    Admin hBaseAdmin = util.getAdmin();
     Table table = util.getConnection().getTable(tableName);
 
     HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index 8fa7f44..0ccda0d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -102,10 +102,9 @@ public class TestEndToEndSplitTransaction {
   }
 
 
-  /*
+  /**
    * This is the test for : HBASE-20940 This test will split the region and try to open an reference
    * over store file. Once store file has any reference, it makes sure that region can't be split
-   * @throws Exception
    */
   @Test
   public void testCanSplitJustAfterASplit() throws Exception {
@@ -125,7 +124,7 @@ public class TestEndToEndSplitTransaction {
       TEST_UTIL.loadTable(source, fam);
       List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
       regions.get(0).forceSplit(null);
-      admin.split(tableName);
+      TEST_UTIL.getAsyncConnection().getAdmin().split(tableName);
 
       while (regions.size() <= 1) {
         regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
@@ -325,7 +324,7 @@ public class TestEndToEndSplitTransaction {
 
           Set<RegionInfo> regions = new TreeSet<>(RegionInfo.COMPARATOR);
           for (HRegionLocation loc : rl.getAllRegionLocations()) {
-            regions.add(loc.getRegionInfo());
+            regions.add(loc.getRegion());
           }
           verifyTableRegions(regions);
         }
@@ -504,7 +503,7 @@ public class TestEndToEndSplitTransaction {
     long start = System.currentTimeMillis();
     while (System.currentTimeMillis() - start < timeout) {
       HRegionLocation loc = MetaTableAccessor.getRegionLocation(conn, hri);
-      if (loc != null && !loc.getRegionInfo().isOffline()) {
+      if (loc != null && !loc.getRegion().isOffline()) {
         log("found region in META: " + hri.getRegionNameAsString());
         break;
       }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java
index 3c3dadf..805decf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java
@@ -82,7 +82,7 @@ public class TestNewVersionBehaviorFromClientSide {
     fam.setNewVersionBehavior(true);
     fam.setMaxVersions(3);
     table.addFamily(fam);
-    TEST_UTIL.getHBaseAdmin().createTable(table);
+    TEST_UTIL.getAdmin().createTable(table);
     return TEST_UTIL.getConnection().getTable(tableName);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java
index 878ca75..2b8953e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java
@@ -162,7 +162,7 @@ public class TestRegionServerAbort {
    */
   @Test
   public void testStopOverrideFromCoprocessor() throws Exception {
-    Admin admin = testUtil.getHBaseAdmin();
+    Admin admin = testUtil.getAdmin();
     HRegionServer regionserver = cluster.getRegionServer(0);
     admin.stopRegionServer(regionserver.getServerName().getHostAndPort());
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
index 24329a0..387aa98 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -167,7 +167,7 @@ public class TestReplicator extends TestReplicationBase {
   }
 
   private void truncateTable(HBaseTestingUtility util, TableName tablename) throws IOException {
-    HBaseAdmin admin = util.getHBaseAdmin();
+    Admin admin = util.getAdmin();
     admin.disableTable(tableName);
     admin.truncateTable(tablename, false);
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index d39c0e6..f9ca754 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import com.google.protobuf.ServiceException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -74,7 +73,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 
@@ -270,33 +268,6 @@ public final class SnapshotTestingUtils {
     }
   }
 
-  /**
-   * Helper method for testing async snapshot operations. Just waits for the
-   * given snapshot to complete on the server by repeatedly checking the master.
-   *
-   * @param master the master running the snapshot
-   * @param snapshot the snapshot to check
-   * @param sleep amount to sleep between checks to see if the snapshot is done
-   * @throws ServiceException if the snapshot fails
-   * @throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
-   */
-  public static void waitForSnapshotToComplete(HMaster master,
-      SnapshotProtos.SnapshotDescription snapshot, long sleep)
-          throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
-    final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
-        .setSnapshot(snapshot).build();
-    IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder()
-        .buildPartial();
-    while (!done.getDone()) {
-      done = master.getMasterRpcServices().isSnapshotDone(null, request);
-      try {
-        Thread.sleep(sleep);
-      } catch (InterruptedException e) {
-        throw new org.apache.hbase.thirdparty.com.google.protobuf.ServiceException(e);
-      }
-    }
-  }
-
   /*
    * Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException
    * except for the last CorruptedSnapshotException
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
index e24d445..1d81dd7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
@@ -28,6 +28,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -57,7 +59,11 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
 
 /**
@@ -273,6 +279,38 @@ public class TestFlushSnapshotFromClient {
     }
   }
 
+  /**
+   * Helper method for testing async snapshot operations. Just waits for the given snapshot to
+   * complete on the server by repeatedly checking the master.
+   * @param master the master running the snapshot
+   * @param snapshot the snapshot to check
+   * @param sleep amount to sleep between checks to see if the snapshot is done
+   */
+  private static void waitForSnapshotToComplete(HMaster master,
+      SnapshotProtos.SnapshotDescription snapshot, long timeoutNanos) throws Exception {
+    final IsSnapshotDoneRequest request =
+      IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build();
+    long start = System.nanoTime();
+    while (System.nanoTime() - start < timeoutNanos) {
+      try {
+        IsSnapshotDoneResponse done = master.getMasterRpcServices().isSnapshotDone(null, request);
+        if (done.getDone()) {
+          return;
+        }
+      } catch (ServiceException e) {
+        // ignore UnknownSnapshotException, this is possible as for AsyncAdmin, the method will
+        // return immediately after sending out the request, no matter whether the master has
+        // processed the request or not.
+        if (!(e.getCause() instanceof UnknownSnapshotException)) {
+          throw e;
+        }
+      }
+
+      Thread.sleep(200);
+    }
+    throw new TimeoutException("Timeout waiting for snapshot " + snapshot + " to complete");
+  }
+
   @Test
   public void testAsyncFlushSnapshot() throws Exception {
     SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder()
@@ -285,7 +323,7 @@ public class TestFlushSnapshotFromClient {
 
     // constantly loop, looking for the snapshot to complete
     HMaster master = UTIL.getMiniHBaseCluster().getMaster();
-    SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
+    waitForSnapshotToComplete(master, snapshot, TimeUnit.MINUTES.toNanos(1));
     LOG.info(" === Async Snapshot Completed ===");
     UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
 
@@ -524,7 +562,6 @@ public class TestFlushSnapshotFromClient {
     SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
   }
 
-
   protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
       long expectedRows) throws IOException {
     SnapshotTestingUtils.verifyRowCount(util, tableName, expectedRows);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java
index e85fc1a..5122464 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java
@@ -335,7 +335,11 @@ public class TestBulkLoadHFiles {
 
     TableName tableName = htd.getTableName();
     if (!util.getAdmin().tableExists(tableName) && (preCreateTable || map != null)) {
-      util.getAdmin().createTable(htd, tableSplitKeys);
+      if (tableSplitKeys != null) {
+        util.getAdmin().createTable(htd, tableSplitKeys);
+      } else {
+        util.getAdmin().createTable(htd);
+      }
     }
 
     Configuration conf = util.getConfiguration();
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
index 565a9c7..2ebbb11 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
@@ -626,7 +626,11 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements TH
     try {
       TableDescriptor descriptor = tableDescriptorFromThrift(desc);
       byte[][] split = splitKeyFromThrift(splitKeys);
-      connectionCache.getAdmin().createTable(descriptor, split);
+      if (split != null) {
+        connectionCache.getAdmin().createTable(descriptor, split);
+      } else {
+        connectionCache.getAdmin().createTable(descriptor);
+      }
     } catch (IOException e) {
       throw getTIOError(e);
     }
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
index 1884fb0..d7aea33 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -720,6 +720,11 @@ public class ThriftAdmin implements Admin {
   }
 
   @Override
+  public List<RegionMetrics> getRegionMetrics(ServerName serverName) {
+    throw new NotImplementedException("getRegionMetrics not supported in ThriftAdmin");
+  }
+
+  @Override
   public List<RegionMetrics> getRegionMetrics(ServerName serverName, TableName tableName) {
     throw new NotImplementedException("getRegionMetrics not supported in ThriftAdmin");
   }
@@ -1127,8 +1132,8 @@ public class ThriftAdmin implements Admin {
   }
 
   @Override
-  public List<UserPermission>
-      getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) {
+  public List<UserPermission> getUserPermissions(
+      GetUserPermissionsRequest getUserPermissionsRequest) {
     throw new NotImplementedException("getUserPermissions not supported in ThriftAdmin");
   }
 


[hbase] 01/27: HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 3d7c2bc613216688ce5cfb0f006b91c6b88c2c7b
Author: zhangduo <zh...@apache.org>
AuthorDate: Fri Nov 30 08:23:47 2018 +0800

    HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer
---
 .../hbase/client/AsyncClusterConnection.java       | 38 +++++++++++++
 .../hadoop/hbase/client/AsyncConnectionImpl.java   | 14 +++--
 .../hbase/client/ClusterConnectionFactory.java     | 63 ++++++++++++++++++++++
 .../hadoop/hbase/client/ConnectionFactory.java     |  2 +-
 .../hbase/client/TestAsyncAdminRpcPriority.java    |  2 +-
 .../hbase/client/TestAsyncTableRpcPriority.java    |  2 +-
 .../apache/hadoop/hbase/util/ReflectionUtils.java  | 22 ++++----
 .../main/java/org/apache/hadoop/hbase/Server.java  | 20 +++++++
 .../org/apache/hadoop/hbase/master/HMaster.java    |  3 ++
 .../hadoop/hbase/regionserver/HRegionServer.java   | 56 +++++++++++++------
 .../regionserver/ReplicationSyncUp.java            |  6 +++
 .../hadoop/hbase/MockRegionServerServices.java     |  6 +++
 .../client/TestAsyncNonMetaRegionLocator.java      |  2 +-
 ...stAsyncNonMetaRegionLocatorConcurrenyLimit.java |  2 +-
 .../hbase/client/TestAsyncRegionLocator.java       |  2 +-
 .../TestAsyncSingleRequestRpcRetryingCaller.java   |  4 +-
 .../hbase/master/MockNoopMasterServices.java       |  6 +++
 .../hadoop/hbase/master/MockRegionServer.java      |  6 +++
 .../hbase/master/TestActiveMasterManager.java      |  6 +++
 .../hbase/master/cleaner/TestHFileCleaner.java     |  6 +++
 .../hbase/master/cleaner/TestHFileLinkCleaner.java |  6 +++
 .../hbase/master/cleaner/TestLogsCleaner.java      |  6 +++
 .../cleaner/TestReplicationHFileCleaner.java       |  6 +++
 .../hbase/regionserver/TestHeapMemoryManager.java  |  6 +++
 .../hbase/regionserver/TestSplitLogWorker.java     |  6 +++
 .../hadoop/hbase/regionserver/TestWALLockup.java   |  6 +++
 .../replication/TestReplicationTrackerZKImpl.java  |  6 +++
 .../regionserver/TestReplicationSourceManager.java |  6 +++
 .../security/token/TestTokenAuthentication.java    |  6 +++
 .../org/apache/hadoop/hbase/util/MockServer.java   |  6 +++
 30 files changed, 292 insertions(+), 36 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
new file mode 100644
index 0000000..c7dea25
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The asynchronous connection for internal usage.
+ */
+@InterfaceAudience.Private
+public interface AsyncClusterConnection extends AsyncConnection {
+
+  /**
+   * Get the nonce generator for this connection.
+   */
+  NonceGenerator getNonceGenerator();
+
+  /**
+   * Get the rpc client we used to communicate with other servers.
+   */
+  RpcClient getRpcClient();
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index f58dfba..4a5d0c3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -28,6 +28,7 @@ import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLE
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import java.io.IOException;
+import java.net.SocketAddress;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
@@ -67,7 +68,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterServ
  * The implementation of AsyncConnection.
  */
 @InterfaceAudience.Private
-class AsyncConnectionImpl implements AsyncConnection {
+class AsyncConnectionImpl implements AsyncClusterConnection {
 
   private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionImpl.class);
 
@@ -119,7 +120,7 @@ class AsyncConnectionImpl implements AsyncConnection {
   private final ClusterStatusListener clusterStatusListener;
 
   public AsyncConnectionImpl(Configuration conf, AsyncRegistry registry, String clusterId,
-      User user) {
+      SocketAddress localAddress, User user) {
     this.conf = conf;
     this.user = user;
     if (user.isLoginFromKeytab()) {
@@ -132,7 +133,7 @@ class AsyncConnectionImpl implements AsyncConnection {
     } else {
       this.metrics = Optional.empty();
     }
-    this.rpcClient = RpcClientFactory.createClient(conf, clusterId, metrics.orElse(null));
+    this.rpcClient = RpcClientFactory.createClient(conf, clusterId, localAddress, metrics.orElse(null));
     this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
     this.hostnameCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true);
     this.rpcTimeout =
@@ -216,11 +217,16 @@ class AsyncConnectionImpl implements AsyncConnection {
   }
 
   // ditto
-  @VisibleForTesting
+  @Override
   public NonceGenerator getNonceGenerator() {
     return nonceGenerator;
   }
 
+  @Override
+  public RpcClient getRpcClient() {
+    return rpcClient;
+  }
+
   private ClientService.Interface createRegionServerStub(ServerName serverName) throws IOException {
     return ClientService.newStub(rpcClient.createRpcChannel(serverName, user, rpcTimeout));
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
new file mode 100644
index 0000000..68c0630
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.net.SocketAddress;
+import java.util.concurrent.ExecutionException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+
+/**
+ * The factory for creating {@link AsyncClusterConnection}.
+ */
+@InterfaceAudience.Private
+public final class ClusterConnectionFactory {
+
+  private ClusterConnectionFactory() {
+  }
+
+  /**
+   * Create a new {@link AsyncClusterConnection} instance.
+   * <p/>
+   * Unlike what we have done in {@link ConnectionFactory}, here we just return an
+   * {@link AsyncClusterConnection} instead of a {@link java.util.concurrent.CompletableFuture},
+   * which means this method could block on fetching the cluster id. This is just used to simplify
+   * the implementation, as when starting new region servers, we do not need to be event-driven. Can
+   * change later if we want a {@link java.util.concurrent.CompletableFuture} here.
+   */
+  public static AsyncClusterConnection createAsyncClusterConnection(Configuration conf,
+      SocketAddress localAddress, User user) throws IOException {
+    AsyncRegistry registry = AsyncRegistryFactory.getRegistry(conf);
+    String clusterId;
+    try {
+      clusterId = registry.getClusterId().get();
+    } catch (InterruptedException e) {
+      throw (IOException) new InterruptedIOException().initCause(e);
+    } catch (ExecutionException e) {
+      Throwable cause = e.getCause();
+      Throwables.propagateIfPossible(cause, IOException.class);
+      throw new IOException(cause);
+    }
+    return new AsyncConnectionImpl(conf, registry, clusterId, localAddress, user);
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index b36485f..b984a99 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -298,7 +298,7 @@ public class ConnectionFactory {
       try {
         future.complete(
           user.runAs((PrivilegedExceptionAction<? extends AsyncConnection>) () -> ReflectionUtils
-            .newInstance(clazz, conf, registry, clusterId, user)));
+            .newInstance(clazz, conf, registry, clusterId, null, user)));
       } catch (Exception e) {
         registry.close();
         future.completeExceptionally(e);
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
index 46d786e..d4db7a4 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
@@ -142,7 +142,7 @@ public class TestAsyncAdminRpcPriority {
     }).when(adminStub).stopServer(any(HBaseRpcController.class), any(StopServerRequest.class),
       any());
 
-    conn = new AsyncConnectionImpl(CONF, new DoNothingAsyncRegistry(CONF), "test",
+    conn = new AsyncConnectionImpl(CONF, new DoNothingAsyncRegistry(CONF), "test", null,
       UserProvider.instantiate(CONF).getCurrent()) {
 
       @Override
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
index 56dcf10..15429cb 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
@@ -175,7 +175,7 @@ public class TestAsyncTableRpcPriority {
         return null;
       }
     }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any());
-    conn = new AsyncConnectionImpl(CONF, new DoNothingAsyncRegistry(CONF), "test",
+    conn = new AsyncConnectionImpl(CONF, new DoNothingAsyncRegistry(CONF), "test", null,
       UserProvider.instantiate(CONF).getCurrent()) {
 
       @Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
index a136846..268249d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
@@ -83,15 +83,19 @@ public class ReflectionUtils {
 
       boolean match = true;
       for (int i = 0; i < ctorParamTypes.length && match; ++i) {
-        Class<?> paramType = paramTypes[i].getClass();
-        match = (!ctorParamTypes[i].isPrimitive()) ? ctorParamTypes[i].isAssignableFrom(paramType) :
-                  ((int.class.equals(ctorParamTypes[i]) && Integer.class.equals(paramType)) ||
-                   (long.class.equals(ctorParamTypes[i]) && Long.class.equals(paramType)) ||
-                   (double.class.equals(ctorParamTypes[i]) && Double.class.equals(paramType)) ||
-                   (char.class.equals(ctorParamTypes[i]) && Character.class.equals(paramType)) ||
-                   (short.class.equals(ctorParamTypes[i]) && Short.class.equals(paramType)) ||
-                   (boolean.class.equals(ctorParamTypes[i]) && Boolean.class.equals(paramType)) ||
-                   (byte.class.equals(ctorParamTypes[i]) && Byte.class.equals(paramType)));
+        if (paramTypes[i] == null) {
+          match = !ctorParamTypes[i].isPrimitive();
+        } else {
+          Class<?> paramType = paramTypes[i].getClass();
+          match = (!ctorParamTypes[i].isPrimitive()) ? ctorParamTypes[i].isAssignableFrom(paramType)
+            : ((int.class.equals(ctorParamTypes[i]) && Integer.class.equals(paramType)) ||
+              (long.class.equals(ctorParamTypes[i]) && Long.class.equals(paramType)) ||
+              (double.class.equals(ctorParamTypes[i]) && Double.class.equals(paramType)) ||
+              (char.class.equals(ctorParamTypes[i]) && Character.class.equals(paramType)) ||
+              (short.class.equals(ctorParamTypes[i]) && Short.class.equals(paramType)) ||
+              (boolean.class.equals(ctorParamTypes[i]) && Boolean.class.equals(paramType)) ||
+              (byte.class.equals(ctorParamTypes[i]) && Byte.class.equals(paramType)));
+        }
       }
 
       if (match) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index fb898ea..c33d5af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -61,6 +63,24 @@ public interface Server extends Abortable, Stoppable {
   ClusterConnection getClusterConnection();
 
   /**
+   * Returns a reference to the servers' async connection.
+   * <p/>
+   * Important note: this method returns a reference to Connection which is managed by Server
+   * itself, so callers must NOT attempt to close connection obtained.
+   */
+  default AsyncConnection getAsyncConnection() {
+    return getAsyncClusterConnection();
+  }
+
+  /**
+   * Returns a reference to the servers' async cluster connection.
+   * <p/>
+   * Important note: this method returns a reference to Connection which is managed by Server
+   * itself, so callers must NOT attempt to close connection obtained.
+   */
+  AsyncClusterConnection getAsyncClusterConnection();
+
+  /**
    * @return The unique server name for this server.
    */
   ServerName getServerName();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 137b558..45c2548 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3049,6 +3049,9 @@ public class HMaster extends HRegionServer implements MasterServices {
     if (this.clusterConnection != null) {
       this.clusterConnection.close();
     }
+    if (this.asyncClusterConnection != null) {
+      this.asyncClusterConnection.close();
+    }
   }
 
   public void stopMaster() throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 6dc5ada..b0b8b90 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -83,7 +83,9 @@ import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.ZNodeClearer;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -110,7 +112,6 @@ import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper;
 import org.apache.hadoop.hbase.ipc.RpcClient;
-import org.apache.hadoop.hbase.ipc.RpcClientFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
@@ -286,6 +287,11 @@ public class HRegionServer extends HasThread implements
   protected ClusterConnection clusterConnection;
 
   /**
+   * The asynchronous cluster connection to be shared by services.
+   */
+  protected AsyncClusterConnection asyncClusterConnection;
+
+  /**
    * Go here to get table descriptors.
    */
   protected TableDescriptors tableDescriptors;
@@ -808,11 +814,7 @@ public class HRegionServer extends HasThread implements
     return true;
   }
 
-  /**
-   * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the
-   * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server.
-   */
-  private ClusterConnection createClusterConnection() throws IOException {
+  private Configuration unsetClientZookeeperQuorum() {
     Configuration conf = this.conf;
     if (conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) {
       // Use server ZK cluster for server-issued connections, so we clone
@@ -820,11 +822,20 @@ public class HRegionServer extends HasThread implements
       conf = new Configuration(this.conf);
       conf.unset(HConstants.CLIENT_ZOOKEEPER_QUORUM);
     }
+    return conf;
+  }
+
+  /**
+   * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the
+   * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server.
+   */
+  private ClusterConnection createClusterConnection() throws IOException {
     // Create a cluster connection that when appropriate, can short-circuit and go directly to the
     // local server if the request is to the local server bypassing RPC. Can be used for both local
     // and remote invocations.
-    ClusterConnection conn = ConnectionUtils.createShortCircuitConnection(conf, null,
-      userProvider.getCurrent(), serverName, rpcServices, rpcServices);
+    ClusterConnection conn =
+      ConnectionUtils.createShortCircuitConnection(unsetClientZookeeperQuorum(), null,
+        userProvider.getCurrent(), serverName, rpcServices, rpcServices);
     // This is used to initialize the batch thread pool inside the connection implementation.
     // When deploy a fresh cluster, we may first use the cluster connection in InitMetaProcedure,
     // which will be executed inside the PEWorker, and then the batch thread pool will inherit the
@@ -858,9 +869,12 @@ public class HRegionServer extends HasThread implements
   /**
    * Setup our cluster connection if not already initialized.
    */
-  protected synchronized void setupClusterConnection() throws IOException {
+  protected final synchronized void setupClusterConnection() throws IOException {
     if (clusterConnection == null) {
       clusterConnection = createClusterConnection();
+      asyncClusterConnection =
+        ClusterConnectionFactory.createAsyncClusterConnection(unsetClientZookeeperQuorum(),
+          new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), userProvider.getCurrent());
     }
   }
 
@@ -874,8 +888,7 @@ public class HRegionServer extends HasThread implements
       initializeZooKeeper();
       setupClusterConnection();
       // Setup RPC client for master communication
-      this.rpcClient = RpcClientFactory.createClient(conf, clusterId, new InetSocketAddress(
-          this.rpcServices.isa.getAddress(), 0), clusterConnection.getConnectionMetrics());
+      this.rpcClient = asyncClusterConnection.getRpcClient();
     } catch (Throwable t) {
       // Call stop if error or process will stick around for ever since server
       // puts up non-daemon threads.
@@ -1124,7 +1137,15 @@ public class HRegionServer extends HasThread implements
         LOG.warn("Attempt to close server's short circuit ClusterConnection failed.", e);
       }
     }
-
+    if (this.asyncClusterConnection != null) {
+      try {
+        this.asyncClusterConnection.close();
+      } catch (IOException e) {
+        // Although the {@link Closeable} interface throws an {@link
+        // IOException}, in reality, the implementation would never do that.
+        LOG.warn("Attempt to close server's AsyncClusterConnection failed.", e);
+      }
+    }
     // Closing the compactSplit thread before closing meta regions
     if (!this.killed && containsMetaTableRegions()) {
       if (!abortRequested || this.fsOk) {
@@ -3809,9 +3830,9 @@ public class HRegionServer extends HasThread implements
   }
 
   @Override
-  public EntityLock regionLock(List<RegionInfo> regionInfos, String description,
-      Abortable abort) throws IOException {
-    return new LockServiceClient(conf, lockStub, clusterConnection.getNonceGenerator())
+  public EntityLock regionLock(List<RegionInfo> regionInfos, String description, Abortable abort)
+      throws IOException {
+    return new LockServiceClient(conf, lockStub, asyncClusterConnection.getNonceGenerator())
       .regionLock(regionInfos, description, abort);
   }
 
@@ -3963,4 +3984,9 @@ public class HRegionServer extends HasThread implements
       Runtime.getRuntime().halt(1);
     }
   }
+
+  @Override
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return asyncClusterConnection;
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index c7bccb3..7d1245c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -180,5 +181,10 @@ public class ReplicationSyncUp extends Configured implements Tool {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index f5e2793..ade9cde 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -380,4 +381,9 @@ public class MockRegionServerServices implements RegionServerServices {
   public ZKPermissionWatcher getZKPermissionWatcher() {
     return null;
   }
+
+  @Override
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return null;
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
index 042e067..559e4a0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
@@ -81,7 +81,7 @@ public class TestAsyncNonMetaRegionLocator {
     TEST_UTIL.getAdmin().balancerSwitch(false, true);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-      registry.getClusterId().get(), User.getCurrent());
+      registry.getClusterId().get(), null, User.getCurrent());
     LOCATOR = new AsyncNonMetaRegionLocator(CONN);
     SPLIT_KEYS = new byte[8][];
     for (int i = 111; i < 999; i += 111) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
index 8cdb4a9..7e06218 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
@@ -125,7 +125,7 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
     TEST_UTIL.getAdmin().balancerSwitch(false, true);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-      registry.getClusterId().get(), User.getCurrent());
+      registry.getClusterId().get(), null, User.getCurrent());
     LOCATOR = new AsyncNonMetaRegionLocator(CONN);
     SPLIT_KEYS = IntStream.range(1, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i)))
       .toArray(byte[][]::new);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
index 0a94def..a7cf322 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
@@ -100,7 +100,7 @@ public class TestAsyncRegionLocator {
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-      registry.getClusterId().get(), User.getCurrent());
+      registry.getClusterId().get(), null, User.getCurrent());
     LOCATOR = CONN.getLocator();
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
index 8959c1d..b6e5362 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
@@ -73,7 +73,7 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
     CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-      registry.getClusterId().get(), User.getCurrent());
+      registry.getClusterId().get(), null, User.getCurrent());
   }
 
   @AfterClass
@@ -163,7 +163,7 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
         }
       };
     try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(),
-      CONN.registry, CONN.registry.getClusterId().get(), User.getCurrent()) {
+      CONN.registry, CONN.registry.getClusterId().get(), null, User.getCurrent()) {
 
       @Override
       AsyncRegionLocator getLocator() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 35d53c5..6aa0d5a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
@@ -485,4 +486,9 @@ public class MockNoopMasterServices implements MasterServices {
   public ZKPermissionWatcher getZKPermissionWatcher() {
     return null;
   }
+
+  @Override
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return null;
+  }
 }
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 2afb456..d7a46eb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -733,4 +734,9 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
   public ZKPermissionWatcher getZKPermissionWatcher() {
     return null;
   }
+
+  @Override
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return null;
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 2300f54..77667a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -349,5 +350,10 @@ public class TestActiveMasterManager {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index 5c8db3e..c5fad32 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -279,6 +280,11 @@ public class TestHFileCleaner {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   @Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index 119194b..fd11ff8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.HFileLink;
@@ -213,5 +214,10 @@ public class TestHFileLinkCleaner {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index 4d254a0..6a5fe9c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -411,6 +412,11 @@ public class TestLogsCleaner {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   static class FaultyZooKeeperWatcher extends ZKWatcher {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index d162bf3..9791643 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -303,6 +304,11 @@ public class TestReplicationHFileCleaner {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   static class FaultyZooKeeperWatcher extends ZKWatcher {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 8c9ce75..4a359e4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -862,6 +863,11 @@ public class TestHeapMemoryManager {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   static class CustomHeapMemoryTuner implements HeapMemoryTuner {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
index 14dc619..43da846 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
@@ -160,6 +161,11 @@ public class TestSplitLogWorker {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   private void waitForCounter(LongAdder ctr, long oldval, long newval, long timems)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index 0e20252..9e9d1d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Durability;
@@ -523,6 +524,11 @@ public class TestWALLockup {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   static class DummyWALActionsListener implements WALActionsListener {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index 863d558..62ab265 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -263,5 +264,10 @@ public class TestReplicationTrackerZKImpl {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 86bbb09..427f319 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
@@ -906,5 +907,10 @@ public abstract class TestReplicationSourceManager {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index e4780f1..92c8e54 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -363,6 +364,11 @@ public class TestTokenAuthentication {
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
+
+    @Override
+    public AsyncClusterConnection getAsyncClusterConnection() {
+      return null;
+    }
   }
 
   @Parameters(name = "{index}: rpcServerImpl={0}")
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index c25db01..13212d2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.log.HBaseMarkers;
@@ -143,4 +144,9 @@ public class MockServer implements Server {
   public Connection createConnection(Configuration conf) throws IOException {
     return null;
   }
+
+  @Override
+  public AsyncClusterConnection getAsyncClusterConnection() {
+    return null;
+  }
 }


[hbase] 23/27: HBASE-22036 Rewrite TestScannerHeartbeatMessages

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d0b3a67304af71d879cdd538c4dfe28e58314350
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Apr 25 18:18:58 2019 +0800

    HBASE-22036 Rewrite TestScannerHeartbeatMessages
---
 .../hbase/client/ScanPerNextResultScanner.java     | 147 +++++++++++++++++++++
 .../regionserver/TestScannerHeartbeatMessages.java |  71 +++++-----
 2 files changed, 187 insertions(+), 31 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ScanPerNextResultScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ScanPerNextResultScanner.java
new file mode 100644
index 0000000..c8665e9
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ScanPerNextResultScanner.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayDeque;
+import java.util.Queue;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+
+/**
+ * A ResultScanner which will only send request to RS when there are no cached results when calling
+ * next, just like the ResultScanner in the old time. Mainly used for writing UTs, that we can
+ * control when to send request to RS. The default ResultScanner implementation will fetch in
+ * background.
+ */
+@InterfaceAudience.Private
+public class ScanPerNextResultScanner implements ResultScanner, AdvancedScanResultConsumer {
+
+  private final AsyncTable<AdvancedScanResultConsumer> table;
+
+  private final Scan scan;
+
+  private final Queue<Result> queue = new ArrayDeque<>();
+
+  private ScanMetrics scanMetrics;
+
+  private boolean closed = false;
+
+  private Throwable error;
+
+  private ScanResumer resumer;
+
+  public ScanPerNextResultScanner(AsyncTable<AdvancedScanResultConsumer> table, Scan scan) {
+    this.table = table;
+    this.scan = scan;
+  }
+
+  @Override
+  public synchronized void onError(Throwable error) {
+    this.error = error;
+    notifyAll();
+  }
+
+  @Override
+  public synchronized void onComplete() {
+    closed = true;
+    notifyAll();
+  }
+
+  @Override
+  public void onScanMetricsCreated(ScanMetrics scanMetrics) {
+    this.scanMetrics = scanMetrics;
+  }
+
+  @Override
+  public synchronized void onNext(Result[] results, ScanController controller) {
+    assert results.length > 0;
+    if (closed) {
+      controller.terminate();
+      return;
+    }
+    for (Result result : results) {
+      queue.add(result);
+    }
+    notifyAll();
+    resumer = controller.suspend();
+  }
+
+  @Override
+  public synchronized void onHeartbeat(ScanController controller) {
+    if (closed) {
+      controller.terminate();
+      return;
+    }
+    if (scan.isNeedCursorResult()) {
+      controller.cursor().ifPresent(c -> queue.add(Result.createCursorResult(c)));
+    }
+  }
+
+  @Override
+  public synchronized Result next() throws IOException {
+    if (queue.isEmpty()) {
+      if (resumer != null) {
+        resumer.resume();
+        resumer = null;
+      } else {
+        table.scan(scan, this);
+      }
+    }
+    while (queue.isEmpty()) {
+      if (closed) {
+        return null;
+      }
+      if (error != null) {
+        Throwables.propagateIfPossible(error, IOException.class);
+        throw new IOException(error);
+      }
+      try {
+        wait();
+      } catch (InterruptedException e) {
+        throw new InterruptedIOException();
+      }
+    }
+    return queue.poll();
+  }
+
+  @Override
+  public synchronized void close() {
+    closed = true;
+    queue.clear();
+    if (resumer != null) {
+      resumer.resume();
+      resumer = null;
+    }
+    notifyAll();
+  }
+
+  @Override
+  public boolean renewLease() {
+    // The renew lease operation will be handled in background
+    return false;
+  }
+
+  @Override
+  public ScanMetrics getScanMetrics() {
+    return scanMetrics;
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
index ea9f7e7..7a21941 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
@@ -39,11 +39,16 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTestConst;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer;
+import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.AsyncTable;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ScanPerNextResultScanner;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.filter.Filter;
@@ -58,10 +63,10 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 
@@ -75,11 +80,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRespon
  * the client when the server has exceeded the time limit during the processing of the scan. When
  * the time limit is reached, the server will return to the Client whatever Results it has
  * accumulated (potentially empty).
- * <p/>
- * TODO: with async client based sync client, we will fetch result in background which makes this
- * test broken. We need to find another way to implement the test.
  */
-@Ignore
 @Category(MediumTests.class)
 public class TestScannerHeartbeatMessages {
 
@@ -89,7 +90,7 @@ public class TestScannerHeartbeatMessages {
 
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
-  private static Table TABLE = null;
+  private static AsyncConnection CONN;
 
   /**
    * Table configuration
@@ -141,16 +142,19 @@ public class TestScannerHeartbeatMessages {
     conf.setLong(StoreScanner.HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK, 1);
     TEST_UTIL.startMiniCluster(1);
 
-    TABLE = createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE);
+    createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE);
+
+    Configuration newConf = new Configuration(conf);
+    newConf.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CLIENT_TIMEOUT);
+    newConf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, CLIENT_TIMEOUT);
+    CONN = ConnectionFactory.createAsyncConnection(newConf).get();
   }
 
-  static Table createTestTable(TableName name, byte[][] rows, byte[][] families,
-      byte[][] qualifiers, byte[] cellValue) throws IOException {
+  static void createTestTable(TableName name, byte[][] rows, byte[][] families, byte[][] qualifiers,
+      byte[] cellValue) throws IOException {
     Table ht = TEST_UTIL.createTable(name, families);
     List<Put> puts = createPuts(rows, families, qualifiers, cellValue);
     ht.put(puts);
-    ht.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CLIENT_TIMEOUT);
-    return ht;
   }
 
   /**
@@ -177,6 +181,7 @@ public class TestScannerHeartbeatMessages {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
+    Closeables.close(CONN, true);
     TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -311,26 +316,28 @@ public class TestScannerHeartbeatMessages {
         scan.setMaxResultSize(Long.MAX_VALUE);
         scan.setCaching(Integer.MAX_VALUE);
         scan.setFilter(new SparseCellFilter());
-        ResultScanner scanner = TABLE.getScanner(scan);
-        int num = 0;
-        while (scanner.next() != null) {
-          num++;
+        try (ScanPerNextResultScanner scanner =
+          new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) {
+          int num = 0;
+          while (scanner.next() != null) {
+            num++;
+          }
+          assertEquals(1, num);
         }
-        assertEquals(1, num);
-        scanner.close();
 
         scan = new Scan();
         scan.setMaxResultSize(Long.MAX_VALUE);
         scan.setCaching(Integer.MAX_VALUE);
         scan.setFilter(new SparseCellFilter());
         scan.setAllowPartialResults(true);
-        scanner = TABLE.getScanner(scan);
-        num = 0;
-        while (scanner.next() != null) {
-          num++;
+        try (ScanPerNextResultScanner scanner =
+          new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) {
+          int num = 0;
+          while (scanner.next() != null) {
+            num++;
+          }
+          assertEquals(NUM_FAMILIES * NUM_QUALIFIERS, num);
         }
-        assertEquals(NUM_FAMILIES * NUM_QUALIFIERS, num);
-        scanner.close();
 
         return null;
       }
@@ -349,13 +356,14 @@ public class TestScannerHeartbeatMessages {
         scan.setMaxResultSize(Long.MAX_VALUE);
         scan.setCaching(Integer.MAX_VALUE);
         scan.setFilter(new SparseRowFilter());
-        ResultScanner scanner = TABLE.getScanner(scan);
-        int num = 0;
-        while (scanner.next() != null) {
-          num++;
+        try (ScanPerNextResultScanner scanner =
+          new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) {
+          int num = 0;
+          while (scanner.next() != null) {
+            num++;
+          }
+          assertEquals(1, num);
         }
-        assertEquals(1, num);
-        scanner.close();
 
         return null;
       }
@@ -374,8 +382,9 @@ public class TestScannerHeartbeatMessages {
   private void testEquivalenceOfScanWithHeartbeats(final Scan scan, int rowSleepTime,
       int cfSleepTime, boolean sleepBeforeCf) throws Exception {
     disableSleeping();
-    final ResultScanner scanner = TABLE.getScanner(scan);
-    final ResultScanner scannerWithHeartbeats = TABLE.getScanner(scan);
+    AsyncTable<AdvancedScanResultConsumer> table = CONN.getTable(TABLE_NAME);
+    final ResultScanner scanner = new ScanPerNextResultScanner(table, scan);
+    final ResultScanner scannerWithHeartbeats = new ScanPerNextResultScanner(table, scan);
 
     Result r1 = null;
     Result r2 = null;


[hbase] 11/27: HBASE-21585 Remove ClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 51148aa2e34f3e902e5880d4d7a0444202b1a5a6
Author: zhangduo <zh...@apache.org>
AuthorDate: Mon Feb 11 20:32:21 2019 +0800

    HBASE-21585 Remove ClusterConnection
---
 .../hadoop/hbase/client/BufferedMutatorImpl.java   |   5 +-
 .../client/CancellableRegionServerCallable.java    |   6 +-
 .../hadoop/hbase/client/ClientServiceCallable.java |  12 +-
 .../hadoop/hbase/client/ClusterConnection.java     | 181 ---------------------
 .../hadoop/hbase/client/ConnectionFactory.java     |   2 +-
 .../hbase/client/ConnectionImplementation.java     | 140 ++++++++++++----
 .../hadoop/hbase/client/ConnectionUtils.java       |  12 +-
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |  33 ++--
 .../org/apache/hadoop/hbase/client/HBaseHbck.java  |  17 +-
 .../org/apache/hadoop/hbase/client/HTable.java     |  27 ++-
 .../java/org/apache/hadoop/hbase/client/Hbck.java  |   4 +-
 .../apache/hadoop/hbase/client/MasterCallable.java |   7 +-
 .../hadoop/hbase/client/MultiServerCallable.java   |  12 +-
 .../hbase/client/NoncedRegionServerCallable.java   |   4 +-
 .../hbase/client/RegionCoprocessorRpcChannel.java  |   4 +-
 .../hadoop/hbase/client/RegionServerCallable.java  |  12 +-
 .../hadoop/hbase/client/ReversedClientScanner.java |   7 -
 .../hbase/client/ReversedScannerCallable.java      |   8 +-
 .../hadoop/hbase/client/ScannerCallable.java       |  11 +-
 .../hbase/client/ScannerCallableWithReplicas.java  |   2 +-
 .../hadoop/hbase/client/TestAsyncProcess.java      |   7 +-
 .../hadoop/hbase/client/TestBufferedMutator.java   |   3 +-
 .../hadoop/hbase/DistributedHBaseCluster.java      |  27 +--
 .../mapreduce/TestMultiTableInputFormatBase.java   |   4 +-
 .../hbase/mapreduce/TestTableInputFormatBase.java  |  10 +-
 .../main/java/org/apache/hadoop/hbase/Server.java  |   9 -
 .../hbase/backup/example/ZKTableArchiveClient.java |  11 +-
 .../hbase/client/AsyncClusterConnection.java       |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |  10 +-
 .../master/assignment/AssignmentManagerUtil.java   |  20 +--
 .../quotas/RegionServerSpaceQuotaManager.java      |   2 +-
 .../DisableTableViolationPolicyEnforcement.java    |   3 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  39 ++---
 .../regionserver/DumpReplicationQueues.java        |   4 +-
 .../regionserver/ReplicationSyncUp.java            |   8 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |  39 ++---
 .../apache/hadoop/hbase/util/HBaseFsckRepair.java  |   1 -
 .../apache/hadoop/hbase/util/MultiHConnection.java |  15 +-
 .../apache/hadoop/hbase/util/RegionSplitter.java   |  15 +-
 .../main/resources/hbase-webapps/master/table.jsp  |   2 +-
 .../java/org/apache/hadoop/hbase/HBaseCluster.java |  23 ---
 .../org/apache/hadoop/hbase/MiniHBaseCluster.java  |  24 ---
 .../hadoop/hbase/MockRegionServerServices.java     |  10 +-
 .../example/TestZooKeeperTableArchiveClient.java   |   6 +-
 .../hbase/client/HConnectionTestingUtility.java    |  19 +--
 .../org/apache/hadoop/hbase/client/TestAdmin1.java |  29 ++--
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |   2 +-
 .../hbase/client/TestAsyncTableAdminApi.java       |   2 +-
 .../apache/hadoop/hbase/client/TestCISleep.java    |  12 +-
 .../hbase/client/TestConnectionImplementation.java |   5 +-
 .../hadoop/hbase/client/TestFromClientSide3.java   |   6 +-
 .../client/TestMetaTableAccessorNoCluster.java     |  10 +-
 .../hadoop/hbase/client/TestMultiParallel.java     |   5 +-
 .../hadoop/hbase/client/TestReplicasClient.java    |  22 ++-
 .../hbase/client/TestSeparateClientZKCluster.java  |   2 +-
 .../hbase/client/TestShortCircuitConnection.java   |   2 +-
 .../hbase/master/MockNoopMasterServices.java       |   8 +-
 .../hadoop/hbase/master/MockRegionServer.java      |   8 +-
 .../hbase/master/TestActiveMasterManager.java      |   9 +-
 .../hbase/master/TestClockSkewDetection.java       |  13 +-
 .../hadoop/hbase/master/TestMasterNoCluster.java   |  13 +-
 .../master/assignment/MockMasterServices.java      |   6 +-
 .../hbase/master/cleaner/TestHFileCleaner.java     |   9 +-
 .../hbase/master/cleaner/TestHFileLinkCleaner.java |  10 +-
 .../hbase/master/cleaner/TestLogsCleaner.java      |   8 +-
 .../cleaner/TestReplicationHFileCleaner.java       |   9 +-
 .../procedure/MasterProcedureTestingUtility.java   |   4 +-
 .../regionserver/TestHRegionServerBulkLoad.java    |  35 +---
 .../hbase/regionserver/TestHeapMemoryManager.java  |   9 +-
 .../hbase/regionserver/TestSplitLogWorker.java     |   9 +-
 .../hadoop/hbase/regionserver/TestWALLockup.java   |   8 +-
 .../replication/TestReplicationTrackerZKImpl.java  |   9 +-
 .../regionserver/TestReplicationSourceManager.java |  10 +-
 .../security/token/TestTokenAuthentication.java    |   9 +-
 .../hadoop/hbase/util/BaseTestHBaseFsck.java       |  13 +-
 .../org/apache/hadoop/hbase/util/MockServer.java   |  10 +-
 .../hadoop/hbase/util/MultiThreadedAction.java     |   6 +-
 .../apache/hadoop/hbase/util/TestHBaseFsckMOB.java |   3 +-
 .../hbase/thrift2/client/ThriftConnection.java     |   2 -
 .../hadoop/hbase/thrift2/TestThriftConnection.java |   4 +-
 80 files changed, 375 insertions(+), 765 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index 922611b..6d70219 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -63,7 +63,7 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class BufferedMutatorImpl implements BufferedMutator {
+class BufferedMutatorImpl implements BufferedMutator {
 
   private static final Logger LOG = LoggerFactory.getLogger(BufferedMutatorImpl.class);
 
@@ -95,7 +95,8 @@ public class BufferedMutatorImpl implements BufferedMutator {
   private final AsyncProcess ap;
 
   @VisibleForTesting
-  BufferedMutatorImpl(ClusterConnection conn, BufferedMutatorParams params, AsyncProcess ap) {
+  BufferedMutatorImpl(ConnectionImplementation conn, BufferedMutatorParams params,
+      AsyncProcess ap) {
     if (conn == null || conn.isClosed()) {
       throw new IllegalArgumentException("Connection is null or closed.");
     }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java
index 6ad9254..f81018e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java
@@ -39,8 +39,10 @@ abstract class CancellableRegionServerCallable<T> extends ClientServiceCallable<
     Cancellable {
   private final RetryingTimeTracker tracker;
   private final int rpcTimeout;
-  CancellableRegionServerCallable(Connection connection, TableName tableName, byte[] row,
-      RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, int priority) {
+
+  CancellableRegionServerCallable(ConnectionImplementation connection, TableName tableName,
+      byte[] row, RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker,
+      int priority) {
     super(connection, tableName, row, rpcController, priority);
     this.rpcTimeout = rpcTimeout;
     this.tracker = tracker;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
index 67ba838..c7006a8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
@@ -31,12 +31,12 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
  * @param <T>
  */
 @InterfaceAudience.Private
-public abstract class ClientServiceCallable<T> extends
-    RegionServerCallable<T, ClientProtos.ClientService.BlockingInterface> {
+public abstract class ClientServiceCallable<T>
+    extends RegionServerCallable<T, ClientProtos.ClientService.BlockingInterface> {
 
-  public ClientServiceCallable(Connection connection, TableName tableName, byte[] row,
+  public ClientServiceCallable(ConnectionImplementation connection, TableName tableName, byte[] row,
       RpcController rpcController, int priority) {
-    super((ConnectionImplementation) connection, tableName, row, rpcController, priority);
+    super(connection, tableName, row, rpcController, priority);
   }
 
   @Override
@@ -46,12 +46,12 @@ public abstract class ClientServiceCallable<T> extends
 
   // Below here are simple methods that contain the stub and the rpcController.
   protected ClientProtos.GetResponse doGet(ClientProtos.GetRequest request)
-  throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
+      throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
     return getStub().get(getRpcController(), request);
   }
 
   protected ClientProtos.MutateResponse doMutate(ClientProtos.MutateRequest request)
-  throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
+      throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
     return getStub().mutate(getRpcController(), request);
   }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
deleted file mode 100644
index 092bd24..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.yetus.audience.InterfaceAudience;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-
-/** Internal methods on Connection that should not be used by user code. */
-@InterfaceAudience.Private
-// NOTE: Although this class is public, this class is meant to be used directly from internal
-// classes and unit tests only.
-public interface ClusterConnection extends Connection {
-
-  /**
-   * Key for configuration in Configuration whose value is the class we implement making a
-   * new Connection instance.
-   */
-  String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl";
-
-  /**
-   * @return - true if the master server is running
-   * @deprecated this has been deprecated without a replacement
-   */
-  @Deprecated
-  boolean isMasterRunning()
-      throws MasterNotRunningException, ZooKeeperConnectionException;
-
-  /**
-   * Use this api to check if the table has been created with the specified number of
-   * splitkeys which was used while creating the given table.
-   * Note : If this api is used after a table's region gets splitted, the api may return
-   * false.
-   * @param tableName
-   *          tableName
-   * @param splitKeys
-   *          splitKeys used while creating table
-   * @throws IOException
-   *           if a remote or network exception occurs
-   */
-  boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws
-      IOException;
-
-  /**
-   * A table that isTableEnabled == false and isTableDisabled == false
-   * is possible. This happens when a table has a lot of regions
-   * that must be processed.
-   * @param tableName table name
-   * @return true if the table is enabled, false otherwise
-   * @throws IOException if a remote or network exception occurs
-   */
-  boolean isTableEnabled(TableName tableName) throws IOException;
-
-  /**
-   * @param tableName table name
-   * @return true if the table is disabled, false otherwise
-   * @throws IOException if a remote or network exception occurs
-   */
-  boolean isTableDisabled(TableName tableName) throws IOException;
-
-  /**
-   * Retrieve TableState, represent current table state.
-   * @param tableName table state for
-   * @return state of the table
-   */
-  TableState getTableState(TableName tableName)  throws IOException;
-
-  /**
-   * Returns a {@link MasterKeepAliveConnection} to the active master
-   */
-  MasterKeepAliveConnection getMaster() throws IOException;
-
-  /**
-   * Get the admin service for master.
-   */
-  AdminService.BlockingInterface getAdminForMaster() throws IOException;
-
-  /**
-   * Establishes a connection to the region server at the specified address.
-   * @param serverName the region server to connect to
-   * @return proxy for HRegionServer
-   * @throws IOException if a remote or network exception occurs
-   */
-  AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException;
-
-  /**
-   * Establishes a connection to the region server at the specified address, and returns
-   * a region client protocol.
-   *
-   * @param serverName the region server to connect to
-   * @return ClientProtocol proxy for RegionServer
-   * @throws IOException if a remote or network exception occurs
-   *
-   */
-  ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException;
-
-  /**
-   * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration.
-   */
-  NonceGenerator getNonceGenerator();
-
-  /**
-   * @return Default AsyncProcess associated with this connection.
-   */
-  AsyncProcess getAsyncProcess();
-
-  /**
-   * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}.
-   * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be
-   * intercepted with the configured {@link RetryingCallerInterceptor}
-   * @param conf configuration
-   * @return RpcRetryingCallerFactory
-   */
-  RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf);
-
-  /**
-   * @return Connection's RpcRetryingCallerFactory instance
-   */
-  RpcRetryingCallerFactory getRpcRetryingCallerFactory();
-
-  /**
-   * @return Connection's RpcControllerFactory instance
-   */
-  RpcControllerFactory getRpcControllerFactory();
-
-  /**
-   * @return a ConnectionConfiguration object holding parsed configuration values
-   */
-  ConnectionConfiguration getConnectionConfiguration();
-
-  /**
-   * @return the current statistics tracker associated with this connection
-   */
-  ServerStatisticTracker getStatisticsTracker();
-
-  /**
-   * @return the configured client backoff policy
-   */
-  ClientBackoffPolicy getBackoffPolicy();
-
-  /**
-   * @return the MetricsConnection instance associated with this connection.
-   */
-  MetricsConnection getConnectionMetrics();
-
-  /**
-   * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so
-   *         supports cell blocks.
-   */
-  boolean hasCellBlockSupport();
-
-  /**
-   * @return the number of region servers that are currently running
-   * @throws IOException if a remote or network exception occurs
-   */
-  int getCurrentNrHRS() throws IOException;
-}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index b984a99..ceef356 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -212,7 +212,7 @@ public class ConnectionFactory {
    */
   public static Connection createConnection(Configuration conf, ExecutorService pool,
     final User user) throws IOException {
-    String className = conf.get(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL,
+    String className = conf.get(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
       ConnectionImplementation.class.getName());
     Class<?> clazz;
     try {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 49fa81b..de377c7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -152,7 +152,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat
     value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
     justification="Access to the conncurrent hash map is under a lock so should be fine.")
 @InterfaceAudience.Private
-class ConnectionImplementation implements ClusterConnection, Closeable {
+class ConnectionImplementation implements Connection, Closeable {
   public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server";
   private static final Logger LOG = LoggerFactory.getLogger(ConnectionImplementation.class);
 
@@ -354,9 +354,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    */
   @VisibleForTesting
   static NonceGenerator injectNonceGeneratorForTesting(
-      ClusterConnection conn, NonceGenerator cnm) {
-    ConnectionImplementation connImpl = (ConnectionImplementation)conn;
-    NonceGenerator ng = connImpl.getNonceGenerator();
+      ConnectionImplementation conn, NonceGenerator cnm) {
+    NonceGenerator ng = conn.getNonceGenerator();
     LOG.warn("Nonce generator is being replaced by test code for "
       + cnm.getClass().getName());
     nonceGenerator = cnm;
@@ -456,7 +455,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
       }), rpcControllerFactory);
   }
 
-  @Override
+  /**
+   * @return the MetricsConnection instance associated with this connection.
+   */
   public MetricsConnection getConnectionMetrics() {
     return this.metrics;
   }
@@ -600,7 +601,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    * @deprecated this has been deprecated without a replacement
    */
   @Deprecated
-  @Override
   public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException {
     // When getting the master connection, we check it's running,
     // so if there is no exception, it means we've been able to get a
@@ -628,18 +628,39 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row);
   }
 
-
-  @Override
+  /**
+   * A table that isTableEnabled == false and isTableDisabled == false
+   * is possible. This happens when a table has a lot of regions
+   * that must be processed.
+   * @param tableName table name
+   * @return true if the table is enabled, false otherwise
+   * @throws IOException if a remote or network exception occurs
+   */
   public boolean isTableEnabled(TableName tableName) throws IOException {
     return getTableState(tableName).inStates(TableState.State.ENABLED);
   }
 
-  @Override
+  /**
+   * @param tableName table name
+   * @return true if the table is disabled, false otherwise
+   * @throws IOException if a remote or network exception occurs
+   */
   public boolean isTableDisabled(TableName tableName) throws IOException {
     return getTableState(tableName).inStates(TableState.State.DISABLED);
   }
 
-  @Override
+  /**
+   * Use this api to check if the table has been created with the specified number of
+   * splitkeys which was used while creating the given table.
+   * Note : If this api is used after a table's region gets splitted, the api may return
+   * false.
+   * @param tableName
+   *          tableName
+   * @param splitKeys
+   *          splitKeys used while creating table
+   * @throws IOException
+   *           if a remote or network exception occurs
+   */
   public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys)
       throws IOException {
     checkClosed();
@@ -809,15 +830,14 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   }
 
   /**
-  *
-  * @param tableName table to get regions of
-  * @param row the row
-  * @param useCache Should we use the cache to retrieve the region information.
-  * @param retry do we retry
-  * @param replicaId the replicaId for the region
-  * @return region locations for this row.
-  * @throws IOException if IO failure occurs
-  */
+   * @param tableName table to get regions of
+   * @param row the row
+   * @param useCache Should we use the cache to retrieve the region information.
+   * @param retry do we retry
+   * @param replicaId the replicaId for the region
+   * @return region locations for this row.
+   * @throws IOException if IO failure occurs
+   */
   RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
       boolean retry, int replicaId) throws IOException {
     checkClosed();
@@ -1048,6 +1068,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     metaCache.clearCache(serverName);
   }
 
+
+  /**
+   * Allows flushing the region cache.
+   */
   @Override
   public void clearRegionLocationCache() {
     metaCache.clearCache();
@@ -1258,12 +1282,19 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     }
   }
 
-  @Override
+  /**
+   * Get the admin service for master.
+   */
   public AdminProtos.AdminService.BlockingInterface getAdminForMaster() throws IOException {
     return getAdmin(get(registry.getMasterAddress()));
   }
 
-  @Override
+  /**
+   * Establishes a connection to the region server at the specified address.
+   * @param serverName the region server to connect to
+   * @return proxy for HRegionServer
+   * @throws IOException if a remote or network exception occurs
+   */
   public AdminProtos.AdminService.BlockingInterface getAdmin(ServerName serverName)
       throws IOException {
     checkClosed();
@@ -1279,7 +1310,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     });
   }
 
-  @Override
+  /**
+   * Establishes a connection to the region server at the specified address, and returns a region
+   * client protocol.
+   * @param serverName the region server to connect to
+   * @return ClientProtocol proxy for RegionServer
+   * @throws IOException if a remote or network exception occurs
+   */
   public BlockingInterface getClient(ServerName serverName) throws IOException {
     checkClosed();
     if (isDeadServer(serverName)) {
@@ -1289,14 +1326,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
       serverName, this.hostnamesCanChange);
     return (ClientProtos.ClientService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> {
       BlockingRpcChannel channel =
-          this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout);
+        this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout);
       return ClientProtos.ClientService.newBlockingStub(channel);
     });
   }
 
   final MasterServiceState masterServiceState = new MasterServiceState(this);
 
-  @Override
   public MasterKeepAliveConnection getMaster() throws IOException {
     return getKeepAliveMasterService();
   }
@@ -1927,6 +1963,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     cacheLocation(hri.getTable(), source, newHrl);
   }
 
+  /**
+   * Deletes cached locations for the specific region.
+   * @param location The location object for the region, to be purged from cache.
+   */
   void deleteCachedRegionLocation(final HRegionLocation location) {
     metaCache.clearCache(location);
   }
@@ -2005,17 +2045,23 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     metaCache.clearCache(regionInfo);
   }
 
-  @Override
+  /**
+   * @return Default AsyncProcess associated with this connection.
+   */
   public AsyncProcess getAsyncProcess() {
     return asyncProcess;
   }
 
-  @Override
+  /**
+   * @return the current statistics tracker associated with this connection
+   */
   public ServerStatisticTracker getStatisticsTracker() {
     return this.stats;
   }
 
-  @Override
+  /**
+   * @return the configured client backoff policy
+   */
   public ClientBackoffPolicy getBackoffPolicy() {
     return this.backoffPolicy;
   }
@@ -2051,7 +2097,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return this.aborted;
   }
 
-  @Override
+  /**
+   * @return the number of region servers that are currently running
+   * @throws IOException if a remote or network exception occurs
+   */
   public int getCurrentNrHRS() throws IOException {
     return get(this.registry.getCurrentNrHRS());
   }
@@ -2094,12 +2143,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     close();
   }
 
-  @Override
+  /**
+   * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration.
+   */
   public NonceGenerator getNonceGenerator() {
     return nonceGenerator;
   }
 
-  @Override
+  /**
+   * Retrieve TableState, represent current table state.
+   * @param tableName table state for
+   * @return state of the table
+   */
   public TableState getTableState(TableName tableName) throws IOException {
     checkClosed();
     TableState tableState = MetaTableAccessor.getTableState(this, tableName);
@@ -2109,28 +2164,43 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return tableState;
   }
 
-  @Override
+  /**
+   * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}.
+   * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be
+   * intercepted with the configured {@link RetryingCallerInterceptor}
+   * @param conf configuration
+   * @return RpcRetryingCallerFactory
+   */
   public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) {
     return RpcRetryingCallerFactory
         .instantiate(conf, this.interceptor, this.getStatisticsTracker());
   }
 
-  @Override
+  /**
+   * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so
+   *         supports cell blocks.
+   */
   public boolean hasCellBlockSupport() {
     return this.rpcClient.hasCellBlockSupport();
   }
 
-  @Override
+  /**
+   * @return a ConnectionConfiguration object holding parsed configuration values
+   */
   public ConnectionConfiguration getConnectionConfiguration() {
     return this.connectionConfig;
   }
 
-  @Override
+  /**
+   * @return Connection's RpcRetryingCallerFactory instance
+   */
   public RpcRetryingCallerFactory getRpcRetryingCallerFactory() {
     return this.rpcCallerFactory;
   }
 
-  @Override
+  /**
+   * @return Connection's RpcControllerFactory instance
+   */
   public RpcControllerFactory getRpcControllerFactory() {
     return this.rpcControllerFactory;
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 4ec7e32..fe1dd3e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -80,6 +80,12 @@ public final class ConnectionUtils {
 
   private static final Logger LOG = LoggerFactory.getLogger(ConnectionUtils.class);
 
+  /**
+   * Key for configuration in Configuration whose value is the class we implement making a new
+   * Connection instance.
+   */
+  public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl";
+
   private ConnectionUtils() {
   }
 
@@ -109,7 +115,7 @@ public final class ConnectionUtils {
    * @param cnm Replaces the nonce generator used, for testing.
    * @return old nonce generator.
    */
-  public static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn,
+  public static NonceGenerator injectNonceGeneratorForTesting(ConnectionImplementation conn,
       NonceGenerator cnm) {
     return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm);
   }
@@ -186,7 +192,7 @@ public final class ConnectionUtils {
    * @return an short-circuit connection.
    * @throws IOException if IO failure occurred
    */
-  public static ClusterConnection createShortCircuitConnection(final Configuration conf,
+  public static ConnectionImplementation createShortCircuitConnection(final Configuration conf,
       ExecutorService pool, User user, final ServerName serverName,
       final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client)
       throws IOException {
@@ -202,7 +208,7 @@ public final class ConnectionUtils {
    */
   @VisibleForTesting
   public static void setupMasterlessConnection(Configuration conf) {
-    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName());
+    conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName());
   }
 
   /**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index f553960..55b83ee 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -446,7 +446,7 @@ public class HBaseAdmin implements Admin {
 
   /** @return Connection used by this object. */
   @Override
-  public Connection getConnection() {
+  public ConnectionImplementation getConnection() {
     return connection;
   }
 
@@ -485,23 +485,24 @@ public class HBaseAdmin implements Admin {
     });
   }
 
-  static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
-      RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
-      int operationTimeout, int rpcTimeout) throws IOException {
+  static TableDescriptor getTableDescriptor(final TableName tableName,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory,
+      final RpcControllerFactory rpcControllerFactory, int operationTimeout, int rpcTimeout)
+      throws IOException {
     if (tableName == null) return null;
     TableDescriptor td =
-        executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
-      @Override
-      protected TableDescriptor rpcCall() throws Exception {
-        GetTableDescriptorsRequest req =
+      executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
+        @Override
+        protected TableDescriptor rpcCall() throws Exception {
+          GetTableDescriptorsRequest req =
             RequestConverter.buildGetTableDescriptorsRequest(tableName);
-        GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
-        if (!htds.getTableSchemaList().isEmpty()) {
-          return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
+          GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
+          if (!htds.getTableSchemaList().isEmpty()) {
+            return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
+          }
+          return null;
         }
-        return null;
-      }
-    }, rpcCallerFactory, operationTimeout, rpcTimeout);
+      }, rpcCallerFactory, operationTimeout, rpcTimeout);
     if (td != null) {
       return td;
     }
@@ -2027,8 +2028,8 @@ public class HBaseAdmin implements Admin {
 
     // Check ZK first.
     // If the connection exists, we may have a connection to ZK that does not work anymore
-    try (ClusterConnection connection =
-      (ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) {
+    try (ConnectionImplementation connection =
+      (ConnectionImplementation) ConnectionFactory.createConnection(copyOfConf)) {
       // can throw MasterNotRunningException
       connection.isMasterRunning();
     }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java
index 79a75d0..c77a736 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java
@@ -22,10 +22,15 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
@@ -33,16 +38,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService.BlockingInterface;
 
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-
-import org.apache.yetus.audience.InterfaceAudience;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 
 /**
- * Use {@link ClusterConnection#getHbck()} to obtain an instance of {@link Hbck} instead of
+ * Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of
  * constructing an HBaseHbck directly.
  *
  * <p>Connection should be an <i>unmanaged</i> connection obtained via
@@ -57,7 +55,6 @@ import org.slf4j.LoggerFactory;
  * by each thread. Pooling or caching of the instance is not recommended.</p>
  *
  * @see ConnectionFactory
- * @see ClusterConnection
  * @see Hbck
  */
 @InterfaceAudience.Private
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 4300f6e..ee6247b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -153,7 +153,6 @@ public class HTable implements Table {
    * @param rpcControllerFactory The RPC controller factory
    * @param pool ExecutorService to be used.
    */
-  @InterfaceAudience.Private
   protected HTable(final ConnectionImplementation connection,
       final TableBuilderBase builder,
       final RpcRetryingCallerFactory rpcCallerFactory,
@@ -449,22 +448,18 @@ public class HTable implements Table {
   }
 
   public static <R> void doBatchWithCallback(List<? extends Row> actions, Object[] results,
-    Callback<R> callback, ClusterConnection connection, ExecutorService pool, TableName tableName)
-    throws InterruptedIOException, RetriesExhaustedWithDetailsException {
-    int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout();
+      Callback<R> callback, Connection connection, ExecutorService pool, TableName tableName)
+      throws InterruptedIOException, RetriesExhaustedWithDetailsException {
+    ConnectionImplementation connImpl = (ConnectionImplementation) connection;
+    int operationTimeout = connImpl.getConnectionConfiguration().getOperationTimeout();
     int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
-        connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-            HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
-    AsyncProcessTask<R> task = AsyncProcessTask.newBuilder(callback)
-            .setPool(pool)
-            .setTableName(tableName)
-            .setRowAccess(actions)
-            .setResults(results)
-            .setOperationTimeout(operationTimeout)
-            .setRpcTimeout(writeTimeout)
-            .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
-            .build();
-    AsyncRequestFuture ars = connection.getAsyncProcess().submit(task);
+      connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+        HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+    AsyncProcessTask<R> task =
+      AsyncProcessTask.newBuilder(callback).setPool(pool).setTableName(tableName)
+        .setRowAccess(actions).setResults(results).setOperationTimeout(operationTimeout)
+        .setRpcTimeout(writeTimeout).setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build();
+    AsyncRequestFuture ars = connImpl.getAsyncProcess().submit(task);
     ars.waitUntilDone();
     if (ars.hasError()) {
       throw ars.getErrors();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java
index 76643e6..249cd87 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java
@@ -21,7 +21,6 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
-
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.TableName;
@@ -31,7 +30,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 
 /**
- * Hbck fixup tool APIs. Obtain an instance from {@link ClusterConnection#getHbck()} and call
+ * Hbck fixup tool APIs. Obtain an instance from {@link Connection#getHbck()} and call
  * {@link #close()} when done.
  * <p>WARNING: the below methods can damage the cluster. It may leave the cluster in an
  * indeterminate state, e.g. region not assigned, or some hdfs files left behind. After running
@@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
  * procedures to get regions back online. DO AT YOUR OWN RISK. For experienced users only.
  *
  * @see ConnectionFactory
- * @see ClusterConnection
  * @since 2.0.2, 2.1.1
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
index 7ae9731..04da2eb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
@@ -43,12 +43,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
  */
 @InterfaceAudience.Private
 abstract class MasterCallable<V> implements RetryingCallable<V>, Closeable {
-  protected final ClusterConnection connection;
+  protected final ConnectionImplementation connection;
   protected MasterKeepAliveConnection master;
   private final HBaseRpcController rpcController;
 
-  MasterCallable(final Connection connection, final RpcControllerFactory rpcConnectionFactory) {
-    this.connection = (ClusterConnection) connection;
+  MasterCallable(ConnectionImplementation connection,
+      final RpcControllerFactory rpcConnectionFactory) {
+    this.connection = connection;
     this.rpcController = rpcConnectionFactory.newController();
   }
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index bfc161e..bf557fa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -31,15 +30,16 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
-import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
 /**
  * Callable that handles the <code>multi</code> method call going against a single
@@ -52,7 +52,7 @@ class MultiServerCallable extends CancellableRegionServerCallable<MultiResponse>
   private MultiAction multiAction;
   private boolean cellBlock;
 
-  MultiServerCallable(final ClusterConnection connection, final TableName tableName,
+  MultiServerCallable(final ConnectionImplementation connection, final TableName tableName,
       final ServerName location, final MultiAction multi, RpcController rpcController,
       int rpcTimeout, RetryingTimeTracker tracker, int priority) {
     super(connection, tableName, null, rpcController, rpcTimeout, tracker, priority);
@@ -141,7 +141,7 @@ class MultiServerCallable extends CancellableRegionServerCallable<MultiResponse>
   private boolean isCellBlock() {
     // This is not exact -- the configuration could have changed on us after connection was set up
     // but it will do for now.
-    ClusterConnection conn = getConnection();
+    ConnectionImplementation conn = getConnection();
     return conn.hasCellBlockSupport();
   }
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java
index 2da8422..69f4f4a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java
@@ -46,8 +46,8 @@ public abstract class NoncedRegionServerCallable<T> extends ClientServiceCallabl
    * @param tableName Table name to which <code>row</code> belongs.
    * @param row The row we want in <code>tableName</code>.
    */
-  public NoncedRegionServerCallable(Connection connection, TableName tableName, byte [] row,
-      HBaseRpcController rpcController, int priority) {
+  public NoncedRegionServerCallable(ConnectionImplementation connection, TableName tableName,
+      byte[] row, HBaseRpcController rpcController, int priority) {
     super(connection, tableName, row, rpcController, priority);
     this.nonce = getConnection().getNonceGenerator().newNonce();
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
index 448302c..80371b7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
@@ -46,7 +46,7 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel {
   private static final Logger LOG = LoggerFactory.getLogger(RegionCoprocessorRpcChannel.class);
   private final TableName table;
   private final byte [] row;
-  private final ClusterConnection conn;
+  private final ConnectionImplementation conn;
   private byte[] lastRegion;
   private final int operationTimeout;
   private final RpcRetryingCallerFactory rpcCallerFactory;
@@ -57,7 +57,7 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel {
    * @param table to connect to
    * @param row to locate region with
    */
-  RegionCoprocessorRpcChannel(ClusterConnection conn, TableName table, byte[] row) {
+  RegionCoprocessorRpcChannel(ConnectionImplementation conn, TableName table, byte[] row) {
     this.table = table;
     this.row = row;
     this.conn = conn;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index 264304e..009544c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -27,11 +26,12 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 
 /**
  * Implementations make a RPC call against a RegionService via a protobuf Service.
@@ -75,12 +75,12 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
    * @param tableName Table name to which <code>row</code> belongs.
    * @param row The row we want in <code>tableName</code>.
    */
-  public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row,
+  public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte[] row,
       RpcController rpcController) {
     this(connection, tableName, row, rpcController, HConstants.NORMAL_QOS);
   }
 
-  public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row,
+  public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte[] row,
       RpcController rpcController, int priority) {
     super();
     this.connection = connection;
@@ -160,7 +160,7 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
   }
 
   /**
-   * @return {@link ClusterConnection} instance used by this Callable.
+   * @return {@link ConnectionImplementation} instance used by this Callable.
    */
   protected ConnectionImplementation getConnection() {
     return this.connection;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
index 34c24c0..2ed037e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
@@ -37,13 +37,6 @@ public class ReversedClientScanner extends ClientScanner {
   /**
    * Create a new ReversibleClientScanner for the specified table Note that the passed
    * {@link Scan}'s start row maybe changed.
-   * @param conf
-   * @param scan
-   * @param tableName
-   * @param connection
-   * @param pool
-   * @param primaryOperationTimeout
-   * @throws IOException
    */
   public ReversedClientScanner(Configuration conf, Scan scan, TableName tableName,
       ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
index 30e541c..6a325b2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
@@ -52,8 +52,8 @@ public class ReversedScannerCallable extends ScannerCallable {
    * @param rpcFactory to create an {@link com.google.protobuf.RpcController} to talk to the
    *          regionserver
    */
-  public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
-      ScanMetrics scanMetrics, RpcControllerFactory rpcFactory) {
+  public ReversedScannerCallable(ConnectionImplementation connection, TableName tableName,
+      Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory) {
     super(connection, tableName, scan, scanMetrics, rpcFactory);
   }
 
@@ -66,8 +66,8 @@ public class ReversedScannerCallable extends ScannerCallable {
    *          regionserver
    * @param replicaId the replica id
    */
-  public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
-      ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) {
+  public ReversedScannerCallable(ConnectionImplementation connection, TableName tableName,
+      Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) {
     super(connection, tableName, scan, scanMetrics, rpcFactory, replicaId);
   }
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index 45b74ef..bf7135f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -101,23 +101,24 @@ public class ScannerCallable extends ClientServiceCallable<Result[]> {
    * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect
    *          metrics
    * @param rpcControllerFactory factory to use when creating
-   *        {@link com.google.protobuf.RpcController}
+   *          {@link com.google.protobuf.RpcController}
    */
-  public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
+  public ScannerCallable(ConnectionImplementation connection, TableName tableName, Scan scan,
       ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) {
     this(connection, tableName, scan, scanMetrics, rpcControllerFactory, 0);
   }
+
   /**
-   *
    * @param connection
    * @param tableName
    * @param scan
    * @param scanMetrics
    * @param id the replicaId
    */
-  public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
+  public ScannerCallable(ConnectionImplementation connection, TableName tableName, Scan scan,
       ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory, int id) {
-    super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(), scan.getPriority());
+    super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(),
+      scan.getPriority());
     this.id = id;
     this.scan = scan;
     this.scanMetrics = scanMetrics;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
index 27e5f87..db956ce 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
@@ -76,7 +76,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
   public ScannerCallableWithReplicas(TableName tableName, ConnectionImplementation cConnection,
       ScannerCallable baseCallable, ExecutorService pool, int timeBeforeReplicas, Scan scan,
       int retries, int scannerTimeout, int caching, Configuration conf,
-      RpcRetryingCaller<Result []> caller) {
+      RpcRetryingCaller<Result[]> caller) {
     this.currentScannerCallable = baseCallable;
     this.cConnection = cConnection;
     this.pool = pool;
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index bd6f03c..02e4c46 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -1124,11 +1124,8 @@ public class TestAsyncProcess {
                1,    BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS);
   }
 
-  private void checkPeriodicFlushParameters(ClusterConnection conn,
-                                            MyAsyncProcess ap,
-                                            long setTO, long expectTO,
-                                            long setTT, long expectTT
-                                            ) {
+  private void checkPeriodicFlushParameters(ConnectionImplementation conn, MyAsyncProcess ap,
+      long setTO, long expectTO, long setTT, long expectTT) {
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
 
     // The BufferedMutatorParams does nothing with the value
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
index f0375e2..647ea32 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
@@ -44,8 +44,7 @@ public class TestBufferedMutator {
   public TestName name = new TestName();
 
   /**
-   * My BufferedMutator.
-   * Just to prove that I can insert a BM other than default.
+   * My BufferedMutator. Just to prove that I can insert a BM other than default.
    */
   public static class MyBufferedMutator extends BufferedMutatorImpl {
     MyBufferedMutator(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory,
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 5a5d6d0..cb60695 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Comparator;
+import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Objects;
@@ -28,7 +29,6 @@ import java.util.TreeSet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterManager.ServiceType;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -37,10 +37,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-
 /**
  * Manages the interactions with an already deployed distributed cluster (as opposed to
  * a pseudo-distributed, or mini/local cluster). This is used by integration and system tests.
@@ -100,18 +96,6 @@ public class DistributedHBaseCluster extends HBaseCluster {
   }
 
   @Override
-  public AdminProtos.AdminService.BlockingInterface getAdminProtocol(ServerName serverName)
-  throws IOException {
-    return ((ClusterConnection)this.connection).getAdmin(serverName);
-  }
-
-  @Override
-  public ClientProtos.ClientService.BlockingInterface getClientProtocol(ServerName serverName)
-  throws IOException {
-    return ((ClusterConnection)this.connection).getClient(serverName);
-  }
-
-  @Override
   public void startRegionServer(String hostname, int port) throws IOException {
     LOG.info("Starting RS on: " + hostname);
     clusterManager.start(ServiceType.HBASE_REGIONSERVER, hostname, port);
@@ -262,13 +246,6 @@ public class DistributedHBaseCluster extends HBaseCluster {
     throw new IOException("did timeout waiting for service to start:" + serverName);
   }
 
-
-  @Override
-  public MasterService.BlockingInterface getMasterAdminService()
-  throws IOException {
-    return ((ClusterConnection)this.connection).getMaster();
-  }
-
   @Override
   public void startMaster(String hostname, int port) throws IOException {
     LOG.info("Starting Master on: " + hostname + ":" + port);
@@ -297,7 +274,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
     long start = System.currentTimeMillis();
     while (System.currentTimeMillis() - start < timeout) {
       try {
-        getMasterAdminService();
+        connection.getAdmin().getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION));
         return true;
       } catch (MasterNotRunningException m) {
         LOG.warn("Master not started yet " + m);
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
index 906abca..eff26d7 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
@@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
@@ -98,7 +98,7 @@ public class TestMultiTableInputFormatBase {
     // canned responses.
     JobContext mockedJobContext = Mockito.mock(JobContext.class);
     Configuration c = HBaseConfiguration.create();
-    c.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName());
+    c.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName());
     Mockito.when(mockedJobContext.getConfiguration()).thenReturn(c);
     // Invent a bunch of scans. Have each Scan go against a different table so a good spread.
     List<Scan> scans = new ArrayList<>();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
index 4436ee1..944bd10 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.anyBoolean;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -90,7 +90,7 @@ public class TestTableInputFormatBase {
   public void testNonSuccessiveSplitsAreNotMerged() throws IOException {
     JobContext context = mock(JobContext.class);
     Configuration conf = HBaseConfiguration.create();
-    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL,
+    conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
         ConnectionForMergeTesting.class.getName());
     conf.set(TableInputFormat.INPUT_TABLE, "testTable");
     conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index c33d5af..e0e95df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.AsyncConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -55,14 +54,6 @@ public interface Server extends Abortable, Stoppable {
   Connection createConnection(Configuration conf) throws IOException;
 
   /**
-   * Returns a reference to the servers' cluster connection. Prefer {@link #getConnection()}.
-   *
-   * Important note: this method returns a reference to Connection which is managed
-   * by Server itself, so callers must NOT attempt to close connection obtained.
-   */
-  ClusterConnection getClusterConnection();
-
-  /**
    * Returns a reference to the servers' async connection.
    * <p/>
    * Important note: this method returns a reference to Connection which is managed by Server
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
index 142788e..af0d560 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
@@ -18,14 +18,13 @@
 package org.apache.hadoop.hbase.backup.example;
 
 import java.io.IOException;
-
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
 /**
@@ -36,9 +35,9 @@ public class ZKTableArchiveClient extends Configured {
 
   /** Configuration key for the archive node. */
   private static final String ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY = "zookeeper.znode.hfile.archive";
-  private ClusterConnection connection;
+  private Connection connection;
 
-  public ZKTableArchiveClient(Configuration conf, ClusterConnection connection) {
+  public ZKTableArchiveClient(Configuration conf, Connection connection) {
     super(conf);
     this.connection = connection;
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index c3f8f8b..45dc8be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -64,7 +64,7 @@ public interface AsyncClusterConnection extends AsyncConnection {
       List<Entry> entries, int replicaId, int numRetries, long operationTimeoutNs);
 
   /**
-   * Return all the replicas for a region. Used for regiong replica replication.
+   * Return all the replicas for a region. Used for region replica replication.
    */
   CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
       boolean reload);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 10f3632..204e380 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1170,7 +1170,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     if (QuotaUtil.isQuotaEnabled(conf)) {
       // Create the quota snapshot notifier
       spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
-      spaceQuotaSnapshotNotifier.initialize(getClusterConnection());
+      spaceQuotaSnapshotNotifier.initialize(getConnection());
       this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics());
       // Start the chore to read the region FS space reports and act on them
       getChoreService().scheduleChore(quotaObserverChore);
@@ -1267,7 +1267,7 @@ public class HMaster extends HRegionServer implements MasterServices {
    */
   private boolean waitForNamespaceOnline() throws InterruptedException, IOException {
     TableState nsTableState =
-      MetaTableAccessor.getTableState(getClusterConnection(), TableName.NAMESPACE_TABLE_NAME);
+      MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME);
     if (nsTableState == null || nsTableState.isDisabled()) {
       // this means we have already migrated the data and disabled or deleted the namespace table,
       // or this is a new depliy which does not have a namespace table from the beginning.
@@ -1857,7 +1857,7 @@ public class HMaster extends HRegionServer implements MasterServices {
         List<NormalizationPlan> plans = this.normalizer.computePlanForTable(table);
         if (plans != null) {
           for (NormalizationPlan plan : plans) {
-            plan.execute(clusterConnection.getAdmin());
+            plan.execute(connection.getAdmin());
             if (plan.getType() == PlanType.SPLIT) {
               splitPlanCount++;
             } else if (plan.getType() == PlanType.MERGE) {
@@ -3058,8 +3058,8 @@ public class HMaster extends HRegionServer implements MasterServices {
     // this is what we want especially if the Master is in startup phase doing call outs to
     // hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on
     // the rpc to timeout.
-    if (this.clusterConnection != null) {
-      this.clusterConnection.close();
+    if (this.connection != null) {
+      this.connection.close();
     }
     if (this.asyncClusterConnection != null) {
       this.asyncClusterConnection.close();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
index 4f9343c..6938410 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
@@ -28,19 +28,16 @@ import java.util.stream.Stream;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 
@@ -66,22 +63,15 @@ final class AssignmentManagerUtil {
   static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env,
       final ServerName regionLocation, final RegionInfo hri, boolean includeBestSplitRow)
       throws IOException {
-    // TODO: There is no timeout on this controller. Set one!
-    HBaseRpcController controller =
-      env.getMasterServices().getClusterConnection().getRpcControllerFactory().newController();
-    final AdminService.BlockingInterface admin =
-      env.getMasterServices().getClusterConnection().getAdmin(regionLocation);
+    AsyncRegionServerAdmin admin =
+      env.getMasterServices().getAsyncClusterConnection().getRegionServerAdmin(regionLocation);
     GetRegionInfoRequest request = null;
     if (includeBestSplitRow) {
       request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName(), false, true);
     } else {
       request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName());
     }
-    try {
-      return admin.getRegionInfo(controller, request);
-    } catch (ServiceException e) {
-      throw ProtobufUtil.handleRemoteException(e);
-    }
+    return FutureUtils.get(admin.getRegionInfo(request));
   }
 
   private static void lock(List<RegionStateNode> regionNodes) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
index 3972700..b9797bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
@@ -90,7 +90,7 @@ public class RegionServerSpaceQuotaManager {
       return;
     }
     // Start the chores
-    this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getClusterConnection());
+    this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getConnection());
     rsServices.getChoreService().scheduleChore(spaceQuotaRefresher);
     this.regionSizeReporter = new RegionSizeReportingChore(rsServices);
     rsServices.getChoreService().scheduleChore(regionSizeReporter);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java
index b325f66..fae0e81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java
@@ -17,12 +17,11 @@
 package org.apache.hadoop.hbase.quotas.policies;
 
 import java.io.IOException;
-
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.quotas.SpaceLimitingException;
 import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
 import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A {@link SpaceViolationPolicyEnforcement} which disables the table. The enforcement counterpart
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 7c9141e..7141b87 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -84,7 +84,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.ZNodeClearer;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
@@ -277,14 +276,17 @@ public class HRegionServer extends HasThread implements
   protected HeapMemoryManager hMemManager;
 
   /**
-   * Cluster connection to be shared by services.
+   * Connection to be shared by services.
+   * <p/>
    * Initialized at server startup and closed when server shuts down.
+   * <p/>
    * Clients must never close it explicitly.
-   * Clients hosted by this Server should make use of this clusterConnection rather than create
-   * their own; if they create their own, there is no way for the hosting server to shutdown
-   * ongoing client RPCs.
+   * <p/>
+   * Clients hosted by this Server should make use of this connection rather than create their own;
+   * if they create their own, there is no way for the hosting server to shutdown ongoing client
+   * RPCs.
    */
-  protected ClusterConnection clusterConnection;
+  protected Connection connection;
 
   /**
    * The asynchronous cluster connection to be shared by services.
@@ -829,11 +831,11 @@ public class HRegionServer extends HasThread implements
    * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the
    * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server.
    */
-  private ClusterConnection createClusterConnection() throws IOException {
+  private Connection createConnection() throws IOException {
     // Create a cluster connection that when appropriate, can short-circuit and go directly to the
     // local server if the request is to the local server bypassing RPC. Can be used for both local
     // and remote invocations.
-    ClusterConnection conn =
+    Connection conn =
       ConnectionUtils.createShortCircuitConnection(unsetClientZookeeperQuorum(), null,
         userProvider.getCurrent(), serverName, rpcServices, rpcServices);
     // This is used to initialize the batch thread pool inside the connection implementation.
@@ -870,8 +872,8 @@ public class HRegionServer extends HasThread implements
    * Setup our cluster connection if not already initialized.
    */
   protected final synchronized void setupClusterConnection() throws IOException {
-    if (clusterConnection == null) {
-      clusterConnection = createClusterConnection();
+    if (connection == null) {
+      connection = createConnection();
       asyncClusterConnection =
         ClusterConnectionFactory.createAsyncClusterConnection(unsetClientZookeeperQuorum(),
           new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), userProvider.getCurrent());
@@ -1128,9 +1130,9 @@ public class HRegionServer extends HasThread implements
       LOG.info("stopping server " + this.serverName);
     }
 
-    if (this.clusterConnection != null && !clusterConnection.isClosed()) {
+    if (this.connection != null && !connection.isClosed()) {
       try {
-        this.clusterConnection.close();
+        this.connection.close();
       } catch (IOException e) {
         // Although the {@link Closeable} interface throws an {@link
         // IOException}, in reality, the implementation would never do that.
@@ -2201,12 +2203,7 @@ public class HRegionServer extends HasThread implements
 
   @Override
   public Connection getConnection() {
-    return getClusterConnection();
-  }
-
-  @Override
-  public ClusterConnection getClusterConnection() {
-    return this.clusterConnection;
+    return this.connection;
   }
 
   @Override
@@ -2312,7 +2309,7 @@ public class HRegionServer extends HasThread implements
           }
         } else {
           try {
-            MetaTableAccessor.updateRegionLocation(clusterConnection,
+            MetaTableAccessor.updateRegionLocation(connection,
               hris[0], serverName, openSeqNum, masterSystemTime);
           } catch (IOException e) {
             LOG.info("Failed to update meta", e);
@@ -2343,7 +2340,7 @@ public class HRegionServer extends HasThread implements
     // Keep looping till we get an error. We want to send reports even though server is going down.
     // Only go down if clusterConnection is null. It is set to null almost as last thing as the
     // HRegionServer does down.
-    while (this.clusterConnection != null && !this.clusterConnection.isClosed()) {
+    while (this.connection != null && !this.connection.isClosed()) {
       RegionServerStatusService.BlockingInterface rss = rssStub;
       try {
         if (rss == null) {
@@ -3837,7 +3834,7 @@ public class HRegionServer extends HasThread implements
 
   @Override
   public void unassign(byte[] regionName) throws IOException {
-    clusterConnection.getAdmin().unassign(regionName, false);
+    connection.getAdmin().unassign(regionName, false);
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 432dbcd..a415477 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.replication.TableCFs;
@@ -208,7 +208,7 @@ public class DumpReplicationQueues extends Configured implements Tool {
 
     Configuration conf = getConf();
     HBaseAdmin.available(conf);
-    ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
+    Connection connection = ConnectionFactory.createConnection(conf);
     Admin admin = connection.getAdmin();
 
     ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(),
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index 7d1245c..bbd7675 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -153,7 +152,7 @@ public class ReplicationSyncUp extends Configured implements Tool {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -163,11 +162,6 @@ public class ReplicationSyncUp extends Configured implements Tool {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 30314b7..b3f11da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
@@ -161,9 +160,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
 
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-
 /**
  * HBaseFsck (hbck) is a tool for checking and repairing region consistency and
  * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not
@@ -245,7 +241,7 @@ public class HBaseFsck extends Configured implements Closeable {
    **********************/
   private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());
   private ClusterMetrics status;
-  private ClusterConnection connection;
+  private Connection connection;
   private Admin admin;
   private Table meta;
   // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions
@@ -585,7 +581,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
     LOG.info("Launching hbck");
 
-    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());
+    connection = ConnectionFactory.createConnection(getConf());
     admin = connection.getAdmin();
     meta = connection.getTable(TableName.META_TABLE_NAME);
     status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,
@@ -4332,10 +4328,10 @@ public class HBaseFsck extends Configured implements Closeable {
     private final HBaseFsck hbck;
     private final ServerName rsinfo;
     private final ErrorReporter errors;
-    private final ClusterConnection connection;
+    private final Connection connection;
 
     WorkItemRegion(HBaseFsck hbck, ServerName info,
-                   ErrorReporter errors, ClusterConnection connection) {
+                   ErrorReporter errors, Connection connection) {
       this.hbck = hbck;
       this.rsinfo = info;
       this.errors = errors;
@@ -4346,32 +4342,29 @@ public class HBaseFsck extends Configured implements Closeable {
     public synchronized Void call() throws IOException {
       errors.progress();
       try {
-        BlockingInterface server = connection.getAdmin(rsinfo);
-
         // list all online regions from this region server
-        List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
+        List<RegionInfo> regions = connection.getAdmin().getRegions(rsinfo);
         regions = filterRegions(regions);
 
         if (details) {
-          errors.detail("RegionServer: " + rsinfo.getServerName() +
-                           " number of regions: " + regions.size());
-          for (RegionInfo rinfo: regions) {
-            errors.detail("  " + rinfo.getRegionNameAsString() +
-                             " id: " + rinfo.getRegionId() +
-                             " encoded_name: " + rinfo.getEncodedName() +
-                             " start: " + Bytes.toStringBinary(rinfo.getStartKey()) +
-                             " end: " + Bytes.toStringBinary(rinfo.getEndKey()));
+          errors.detail(
+            "RegionServer: " + rsinfo.getServerName() + " number of regions: " + regions.size());
+          for (RegionInfo rinfo : regions) {
+            errors.detail("  " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() +
+              " encoded_name: " + rinfo.getEncodedName() + " start: " +
+              Bytes.toStringBinary(rinfo.getStartKey()) + " end: " +
+              Bytes.toStringBinary(rinfo.getEndKey()));
           }
         }
 
         // check to see if the existence of this region matches the region in META
-        for (RegionInfo r:regions) {
+        for (RegionInfo r : regions) {
           HbckInfo hbi = hbck.getOrCreateInfo(r.getEncodedName());
           hbi.addServer(r, rsinfo);
         }
-      } catch (IOException e) {          // unable to connect to the region server.
-        errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "RegionServer: " + rsinfo.getServerName() +
-          " Unable to fetch region information. " + e);
+      } catch (IOException e) { // unable to connect to the region server.
+        errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE,
+          "RegionServer: " + rsinfo.getServerName() + " Unable to fetch region information. " + e);
         throw e;
       }
       return null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 121d06c..d4a28c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
index 5805793..d095fa3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
@@ -26,19 +26,17 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Provides ability to create multiple Connection instances and allows to process a batch of
@@ -112,14 +110,11 @@ public class MultiHConnection {
    * @param callback to run when results are in
    * @throws IOException If IO failure occurs
    */
-  @SuppressWarnings("deprecation")
   public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName,
       Object[] results, Batch.Callback<R> callback) throws IOException {
     // Currently used by RegionStateStore
-    ClusterConnection conn =
-      (ClusterConnection) connections[ThreadLocalRandom.current().nextInt(noOfConnections)];
-
-    HTable.doBatchWithCallback(actions, results, callback, conn, batchPool, tableName);
+    HTable.doBatchWithCallback(actions, results, callback,
+      connections[ThreadLocalRandom.current().nextInt(noOfConnections)], batchPool, tableName);
   }
 
   // Copied from ConnectionImplementation.getBatchPool()
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index a779d36..540e7f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.NoServerForRegionException;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -550,7 +550,7 @@ public class RegionSplitter {
                   }
 
                   // make sure this region wasn't already split
-                  byte[] sk = regionLoc.getRegionInfo().getStartKey();
+                  byte[] sk = regionLoc.getRegion().getStartKey();
                   if (sk.length != 0) {
                     if (Bytes.equals(split, sk)) {
                       LOG.debug("Region already split on "
@@ -712,7 +712,6 @@ public class RegionSplitter {
       htd = table.getDescriptor();
     }
     try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
-
       // for every region that hasn't been verified as a finished split
       for (Pair<byte[], byte[]> region : regionList) {
         byte[] start = region.getFirst();
@@ -720,7 +719,7 @@ public class RegionSplitter {
 
         // see if the new split daughter region has come online
         try {
-          HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo();
+          RegionInfo dri = regionLocator.getRegionLocation(split, true).getRegion();
           if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
             logicalSplitting.add(region);
             continue;
@@ -735,10 +734,10 @@ public class RegionSplitter {
         try {
           // when a daughter region is opened, a compaction is triggered
           // wait until compaction completes for both daughter regions
-          LinkedList<HRegionInfo> check = Lists.newLinkedList();
-          check.add(regionLocator.getRegionLocation(start).getRegionInfo());
-          check.add(regionLocator.getRegionLocation(split).getRegionInfo());
-          for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) {
+          LinkedList<RegionInfo> check = Lists.newLinkedList();
+          check.add(regionLocator.getRegionLocation(start).getRegion());
+          check.add(regionLocator.getRegionLocation(split).getRegion());
+          for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) {
             byte[] sk = hri.getStartKey();
             if (sk.length == 0)
               sk = splitAlgo.firstRow();
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index ced5b44..e52d8b2 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -261,7 +261,7 @@ if ( fqtn != null ) {
         stateMap.put(regionInfo.getEncodedName(), regionState);
     }
   }
-  RegionLocator r = master.getClusterConnection().getRegionLocator(table.getName());
+  RegionLocator r = master.getConnection().getRegionLocator(table.getName());
   try { %>
 <h2>Table Attributes</h2>
 <table class="table table-striped">
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
index d760a7d..8020553 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase;
 
 import java.io.Closeable;
 import java.io.IOException;
-
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -28,10 +27,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-
 /**
  * This class defines methods that can help with managing HBase clusters
  * from unit tests and system tests. There are 3 types of cluster deployments:
@@ -98,24 +93,6 @@ public abstract class HBaseCluster implements Closeable, Configurable {
   }
 
   /**
-   * Returns an {@link MasterService.BlockingInterface} to the active master
-   */
-  public abstract MasterService.BlockingInterface getMasterAdminService()
-  throws IOException;
-
-  /**
-   * Returns an AdminProtocol interface to the regionserver
-   */
-  public abstract AdminService.BlockingInterface getAdminProtocol(ServerName serverName)
-  throws IOException;
-
-  /**
-   * Returns a ClientProtocol interface to the regionserver
-   */
-  public abstract ClientService.BlockingInterface getClientProtocol(ServerName serverName)
-  throws IOException;
-
-  /**
    * Starts a new region server on the given hostname or if this is a mini/local cluster,
    * starts a region server locally.
    * @param hostname the hostname to start the regionserver on
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index 473eb74..92cfddf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -24,7 +24,6 @@ import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -42,9 +41,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
 
 /**
@@ -521,15 +517,6 @@ public class MiniHBaseCluster extends HBaseCluster {
    * Returns the current active master, if available.
    * @return the active HMaster, null if none is active.
    */
-  @Override
-  public MasterService.BlockingInterface getMasterAdminService() {
-    return this.hbaseCluster.getActiveMaster().getMasterRpcServices();
-  }
-
-  /**
-   * Returns the current active master, if available.
-   * @return the active HMaster, null if none is active.
-   */
   public HMaster getMaster() {
     return this.hbaseCluster.getActiveMaster();
   }
@@ -921,15 +908,4 @@ public class MiniHBaseCluster extends HBaseCluster {
     }
     return -1;
   }
-
-  @Override
-  public AdminService.BlockingInterface getAdminProtocol(ServerName serverName) throws IOException {
-    return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices();
-  }
-
-  @Override
-  public ClientService.BlockingInterface getClientProtocol(ServerName serverName)
-  throws IOException {
-    return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices();
-  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index ade9cde..77b2b91 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -32,7 +32,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.locking.EntityLock;
@@ -162,7 +161,7 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public ClusterConnection getConnection() {
+  public Connection getConnection() {
     return null;
   }
 
@@ -266,7 +265,6 @@ public class MockRegionServerServices implements RegionServerServices {
 
   @Override
   public ServerNonceManager getNonceManager() {
-    // TODO Auto-generated method stub
     return null;
   }
 
@@ -277,7 +275,6 @@ public class MockRegionServerServices implements RegionServerServices {
 
   @Override
   public boolean registerService(Service service) {
-    // TODO Auto-generated method stub
     return false;
   }
 
@@ -292,11 +289,6 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public ClusterConnection getClusterConnection() {
-    return null;
-  }
-
-  @Override
   public ThroughputController getFlushThroughputController() {
     return null;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
index 16f3930..618fe74 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
@@ -86,7 +86,7 @@ public class TestZooKeeperTableArchiveClient {
   private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
   private static ZKTableArchiveClient archivingClient;
   private final List<Path> toCleanup = new ArrayList<>();
-  private static ClusterConnection CONNECTION;
+  private static Connection CONNECTION;
   private static RegionServerServices rss;
 
   /**
@@ -96,7 +96,7 @@ public class TestZooKeeperTableArchiveClient {
   public static void setupCluster() throws Exception {
     setupConf(UTIL.getConfiguration());
     UTIL.startMiniZKCluster();
-    CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration());
+    CONNECTION = ConnectionFactory.createConnection(UTIL.getConfiguration());
     archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION);
     // make hfile archiving node so we can archive files
     ZKWatcher watcher = UTIL.getZooKeeperWatcher();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 2a5a395..2c129a8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -57,11 +57,11 @@ public class HConnectionTestingUtility {
    * @throws ZooKeeperConnectionException
    */
   public static ConnectionImplementation getMockedConnection(final Configuration conf)
-  throws ZooKeeperConnectionException {
+      throws ZooKeeperConnectionException {
     ConnectionImplementation connection = Mockito.mock(ConnectionImplementation.class);
     Mockito.when(connection.getConfiguration()).thenReturn(conf);
-    Mockito.when(connection.getRpcControllerFactory()).thenReturn(
-      Mockito.mock(RpcControllerFactory.class));
+    Mockito.when(connection.getRpcControllerFactory())
+      .thenReturn(Mockito.mock(RpcControllerFactory.class));
     // we need a real retrying caller
     RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf);
     Mockito.when(connection.getRpcRetryingCallerFactory()).thenReturn(callerFactory);
@@ -81,11 +81,10 @@ public class HConnectionTestingUtility {
    *          the mocked connection
    * @return Mock up a connection that returns a {@link Configuration} when
    *         {@link ConnectionImplementation#getConfiguration()} is called, a 'location' when
-   *         {@link ConnectionImplementation#getRegionLocation(TableName,byte[], boolean)}
-   *         is called, and that returns the passed
-   *         {@link AdminProtos.AdminService.BlockingInterface} instance when
-   *         {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns the passed
-   *         {@link ClientProtos.ClientService.BlockingInterface} instance when
+   *         {@link ConnectionImplementation#getRegionLocation(TableName, byte[], boolean)} is
+   *         called, and that returns the passed {@link AdminProtos.AdminService.BlockingInterface}
+   *         instance when {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns
+   *         the passed {@link ClientProtos.ClientService.BlockingInterface} instance when
    *         {@link ConnectionImplementation#getClient(ServerName)} is called (Be sure to call
    *         {@link Connection#close()} when done with this mocked Connection.
    */
@@ -138,9 +137,7 @@ public class HConnectionTestingUtility {
    * calling {@link Connection#close()} else it will stick around; this is probably not what you
    * want.
    * @param conf configuration
-   * @return ConnectionImplementation object for <code>conf</code>
-   * @throws ZooKeeperConnectionException [Dead link]: See also
-   *           {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
+   * @return ClusterConnection object for <code>conf</code>
    */
   public static ConnectionImplementation getSpiedConnection(final Configuration conf)
       throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index ba11858..efdf187 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -634,8 +634,8 @@ public class TestAdmin1 {
     assertFalse(ADMIN.tableExists(tableName));
   }
 
-  protected void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator, int
-      expectedRegions) throws IOException {
+  private void verifyRoundRobinDistribution(ConnectionImplementation c,
+      RegionLocator regionLocator, int expectedRegions) throws IOException {
     int numRS = c.getCurrentNrHRS();
     List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
     Map<ServerName, List<RegionInfo>> server2Regions = new HashMap<>();
@@ -654,13 +654,14 @@ public class TestAdmin1 {
       // which contains less regions by intention.
       numRS--;
     }
-    float average = (float) expectedRegions/numRS;
-    int min = (int)Math.floor(average);
-    int max = (int)Math.ceil(average);
+    float average = (float) expectedRegions / numRS;
+    int min = (int) Math.floor(average);
+    int max = (int) Math.ceil(average);
     for (List<RegionInfo> regionList : server2Regions.values()) {
-      assertTrue("numRS=" + numRS + ", min=" + min + ", max=" + max +
-        ", size=" + regionList.size() + ", tablesOnMaster=" + tablesOnMaster,
-      regionList.size() == min || regionList.size() == max);
+      assertTrue(
+        "numRS=" + numRS + ", min=" + min + ", max=" + max + ", size=" + regionList.size() +
+          ", tablesOnMaster=" + tablesOnMaster,
+        regionList.size() == min || regionList.size() == max);
     }
   }
 
@@ -740,7 +741,7 @@ public class TestAdmin1 {
     List<HRegionLocation> regions;
     Iterator<HRegionLocation> hris;
     RegionInfo hri;
-    ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
+    ConnectionImplementation conn = (ConnectionImplementation) TEST_UTIL.getConnection();
     try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
       regions = l.getAllRegionLocations();
 
@@ -1241,13 +1242,9 @@ public class TestAdmin1 {
       byte[][] nameofRegionsToMerge = new byte[2][];
       nameofRegionsToMerge[0] =  regions.get(1).getFirst().getEncodedNameAsBytes();
       nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
-      MergeTableRegionsRequest request = RequestConverter
-          .buildMergeTableRegionsRequest(
-            nameofRegionsToMerge,
-            true,
-            HConstants.NO_NONCE,
-            HConstants.NO_NONCE);
-      ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster()
+      MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(
+        nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
+      ((ConnectionImplementation) TEST_UTIL.getAdmin().getConnection()).getMaster()
         .mergeTableRegions(null, request);
     } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException m) {
       Throwable t = m.getCause();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index f4aafc0..6852718 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -769,7 +769,7 @@ public class TestAdmin2 {
     Assert.assertNotNull(store);
     Assert.assertEquals(expectedStoreFilesSize, store.getSize());
 
-    ClusterConnection conn = ((ClusterConnection) ADMIN.getConnection());
+    ConnectionImplementation conn = (ConnectionImplementation) ADMIN.getConnection();
     HBaseRpcController controller = conn.getRpcControllerFactory().newController();
     for (int i = 0; i < 10; i++) {
       RegionInfo ri =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 1f62731..0f08f44 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -274,7 +274,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
 
   private void verifyRoundRobinDistribution(List<HRegionLocation> regions, int expectedRegions)
       throws IOException {
-    int numRS = ((ClusterConnection) TEST_UTIL.getConnection()).getCurrentNrHRS();
+    int numRS = ((ConnectionImplementation) TEST_UTIL.getConnection()).getCurrentNrHRS();
 
     Map<ServerName, List<RegionInfo>> server2Regions = new HashMap<>();
     regions.stream().forEach((loc) -> {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
index cd27a30..fd0eb7b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
@@ -94,9 +94,9 @@ public class TestCISleep extends AbstractTestCITimeout {
     final TableName tableName = TableName.valueOf(name.getMethodName());
     TEST_UTIL.createTable(tableName, FAM_NAM);
     ClientServiceCallable<Object> regionServerCallable =
-      new ClientServiceCallable<Object>(TEST_UTIL.getConnection(), tableName, FAM_NAM,
-          new RpcControllerFactory(TEST_UTIL.getConfiguration()).newController(),
-          HConstants.PRIORITY_UNSET) {
+      new ClientServiceCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
+        tableName, FAM_NAM, new RpcControllerFactory(TEST_UTIL.getConfiguration()).newController(),
+        HConstants.PRIORITY_UNSET) {
         @Override
         protected Object rpcCall() throws Exception {
           return null;
@@ -126,9 +126,9 @@ public class TestCISleep extends AbstractTestCITimeout {
       assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f));
     }
 
-    try (
-      MasterCallable<Object> masterCallable = new MasterCallable<Object>(TEST_UTIL.getConnection(),
-          new RpcControllerFactory(TEST_UTIL.getConfiguration())) {
+    try (MasterCallable<Object> masterCallable =
+      new MasterCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
+        new RpcControllerFactory(TEST_UTIL.getConfiguration())) {
         @Override
         protected Object rpcCall() throws Exception {
           return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
index b1ad866..8a4c065 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
@@ -831,7 +831,7 @@ public class TestConnectionImplementation {
    *  from ZK by the client.
    */
   @Test
-  public void testConnection() throws Exception{
+  public void testConnection() throws Exception {
     // We create an empty config and add the ZK address.
     Configuration c = new Configuration();
     c.set(HConstants.ZOOKEEPER_QUORUM,
@@ -840,7 +840,8 @@ public class TestConnectionImplementation {
       TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT));
 
     // This should be enough to connect
-    ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(c);
+    ConnectionImplementation conn =
+      (ConnectionImplementation) ConnectionFactory.createConnection(c);
     assertTrue(conn.isMasterRunning());
     conn.close();
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index 9ec7b96..f32123d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -154,7 +154,7 @@ public class TestFromClientSide3 {
       // connection needed for poll-wait
       HRegionLocation loc = locator.getRegionLocation(row, true);
       AdminProtos.AdminService.BlockingInterface server =
-        ((ClusterConnection) admin.getConnection()).getAdmin(loc.getServerName());
+        ((ConnectionImplementation) admin.getConnection()).getAdmin(loc.getServerName());
       byte[] regName = loc.getRegionInfo().getRegionName();
 
       for (int i = 0; i < nFlushes; i++) {
@@ -276,7 +276,7 @@ public class TestFromClientSide3 {
     try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) {
       TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS);
       try (Admin admin = TEST_UTIL.getAdmin()) {
-        ClusterConnection connection = (ClusterConnection) TEST_UTIL.getConnection();
+        ConnectionImplementation connection = (ConnectionImplementation) TEST_UTIL.getConnection();
 
         // Create 3 store files.
         byte[] row = Bytes.toBytes(random.nextInt());
@@ -655,7 +655,7 @@ public class TestFromClientSide3 {
 
   @Test
   public void testConnectionDefaultUsesCodec() throws Exception {
-    ClusterConnection con = (ClusterConnection) TEST_UTIL.getConnection();
+    ConnectionImplementation con = (ConnectionImplementation) TEST_UTIL.getConnection();
     assertTrue(con.hasCellBlockSupport());
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
index 53f5064..108ab7f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
@@ -107,15 +107,13 @@ public class TestMetaTableAccessorNoCluster {
     Result r = Result.create(kvs);
     assertNull(MetaTableAccessor.getRegionInfo(r));
 
-    byte [] f = HConstants.CATALOG_FAMILY;
+    byte[] f = HConstants.CATALOG_FAMILY;
     // Make a key value that doesn't have the expected qualifier.
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
-      HConstants.SERVER_QUALIFIER, f));
+    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.SERVER_QUALIFIER, f));
     r = Result.create(kvs);
     assertNull(MetaTableAccessor.getRegionInfo(r));
     // Make a key that does not have a regioninfo value.
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
-      HConstants.REGIONINFO_QUALIFIER, f));
+    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, f));
     RegionInfo hri = MetaTableAccessor.getRegionInfo(Result.create(kvs));
     assertTrue(hri == null);
     // OK, give it what it expects
@@ -161,7 +159,7 @@ public class TestMetaTableAccessorNoCluster {
           RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO)));
       kvs.add(new KeyValue(rowToVerify,
         HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
-        Bytes.toBytes(sn.getHostAndPort())));
+        Bytes.toBytes(sn.getAddress().toString())));
       kvs.add(new KeyValue(rowToVerify,
         HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
         Bytes.toBytes(sn.getStartcode())));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
index 7d36e99..50c9bd8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
@@ -553,7 +553,7 @@ public class TestMultiParallel {
     };
 
     NonceGenerator oldCnm =
-      ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection)connection, cnm);
+      ConnectionUtils.injectNonceGeneratorForTesting((ConnectionImplementation) connection, cnm);
 
     // First test sequential requests.
     try {
@@ -615,7 +615,8 @@ public class TestMultiParallel {
       validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L));
       table.close();
     } finally {
-      ConnectionImplementation.injectNonceGeneratorForTesting((ClusterConnection) connection, oldCnm);
+      ConnectionImplementation.injectNonceGeneratorForTesting((ConnectionImplementation) connection,
+        oldCnm);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 6616b3b..c8a7ca1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -571,7 +571,7 @@ public class TestReplicasClient {
       LOG.info("get works and is not stale done");
 
       //reset
-      ClusterConnection connection = (ClusterConnection) HTU.getConnection();
+      ConnectionImplementation connection = (ConnectionImplementation) HTU.getConnection();
       Counter hedgedReadOps = connection.getConnectionMetrics().hedgedReadOps;
       Counter hedgedReadWin = connection.getConnectionMetrics().hedgedReadWin;
       hedgedReadOps.dec(hedgedReadOps.getCount());
@@ -638,7 +638,7 @@ public class TestReplicasClient {
 
       Thread.sleep(1000 + REFRESH_PERIOD * 2);
 
-      AsyncProcess ap = ((ClusterConnection) HTU.getConnection()).getAsyncProcess();
+      AsyncProcess ap = ((ConnectionImplementation) HTU.getConnection()).getAsyncProcess();
 
       // Make primary slowdown
       SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(1));
@@ -654,16 +654,14 @@ public class TestReplicasClient {
       gets.add(g);
       Object[] results = new Object[2];
 
-      int operationTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getOperationTimeout();
-      int readTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getReadRpcTimeout();
-      AsyncProcessTask task = AsyncProcessTask.newBuilder()
-              .setPool(HTable.getDefaultExecutor(HTU.getConfiguration()))
-              .setTableName(table.getName())
-              .setRowAccess(gets)
-              .setResults(results)
-              .setOperationTimeout(operationTimeout)
-              .setRpcTimeout(readTimeout)
-              .build();
+      int operationTimeout = ((ConnectionImplementation) HTU.getConnection())
+        .getConnectionConfiguration().getOperationTimeout();
+      int readTimeout = ((ConnectionImplementation) HTU.getConnection())
+        .getConnectionConfiguration().getReadRpcTimeout();
+      AsyncProcessTask task =
+        AsyncProcessTask.newBuilder().setPool(HTable.getDefaultExecutor(HTU.getConfiguration()))
+          .setTableName(table.getName()).setRowAccess(gets).setResults(results)
+          .setOperationTimeout(operationTimeout).setRpcTimeout(readTimeout).build();
       AsyncRequestFuture reqs = ap.submit(task);
       reqs.waitUntilDone();
       // verify we got the right results back
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
index b0ec37e..d7f0c87 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
@@ -206,7 +206,7 @@ public class TestSeparateClientZKCluster {
     // create table
     Connection conn = TEST_UTIL.getConnection();
     Admin admin = conn.getAdmin();
-    HTable table = (HTable) conn.getTable(tn);
+    Table table = conn.getTable(tn);
     try {
       ColumnFamilyDescriptorBuilder cfDescBuilder =
           ColumnFamilyDescriptorBuilder.newBuilder(family);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
index beaa59b..f743388 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
@@ -75,7 +75,7 @@ public class TestShortCircuitConnection {
     htd.addFamily(hcd);
     UTIL.createTable(htd, null);
     HRegionServer regionServer = UTIL.getRSForFirstRegionInTable(tableName);
-    ClusterConnection connection = regionServer.getClusterConnection();
+    ConnectionImplementation connection = (ConnectionImplementation) regionServer.getConnection();
     Table tableIf = connection.getTable(tableName);
     assertTrue(tableIf instanceof HTable);
     HTable table = (HTable) tableIf;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 6aa0d5a..7cfec57 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
@@ -163,7 +162,7 @@ public class MockNoopMasterServices implements MasterServices {
   }
 
   @Override
-  public ClusterConnection getConnection() {
+  public Connection getConnection() {
     return null;
   }
 
@@ -355,11 +354,6 @@ public class MockNoopMasterServices implements MasterServices {
   }
 
   @Override
-  public ClusterConnection getClusterConnection() {
-    return null;
-  }
-
-  @Override
   public LoadBalancer getLoadBalancer() {
     return null;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index d7a46eb..211efc0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -305,7 +304,7 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
   }
 
   @Override
-  public ClusterConnection getConnection() {
+  public Connection getConnection() {
     return null;
   }
 
@@ -621,11 +620,6 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
   }
 
   @Override
-  public ClusterConnection getClusterConnection() {
-    return null;
-  }
-
-  @Override
   public ThroughputController getFlushThroughputController() {
     return null;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 77667a7..7f9605e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -313,7 +312,7 @@ public class TestActiveMasterManager {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -331,12 +330,6 @@ public class TestActiveMasterManager {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
index a0aae32..0deea15 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
@@ -18,16 +18,12 @@
 package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
 import java.net.InetAddress;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClockOutOfSyncException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.ClassRule;
@@ -51,14 +47,7 @@ public class TestClockSkewDetection {
   @Test
   public void testClockSkewDetection() throws Exception {
     final Configuration conf = HBaseConfiguration.create();
-    ServerManager sm = new ServerManager(new MockNoopMasterServices(conf) {
-      @Override
-      public ClusterConnection getClusterConnection() {
-        ClusterConnection conn = mock(ClusterConnection.class);
-        when(conn.getRpcControllerFactory()).thenReturn(mock(RpcControllerFactory.class));
-        return conn;
-      }
-    });
+    ServerManager sm = new ServerManager(new MockNoopMasterServices(conf));
 
     LOG.debug("regionServerStartup 1");
     InetAddress ia1 = InetAddress.getLocalHost();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index e73ba75..4f03108 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -184,7 +184,7 @@ public class TestMasterNoCluster {
     // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
     // the conf from the master; the conf will already have an ClusterConnection
     // associate so the below mocking of a connection will fail.
-    final ClusterConnection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate(
+    final Connection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate(
         TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(),
         HRegionInfo.FIRST_META_REGIONINFO);
     HMaster master = new HMaster(conf) {
@@ -212,12 +212,7 @@ public class TestMasterNoCluster {
       }
 
       @Override
-      public ClusterConnection getConnection() {
-        return mockedConnection;
-      }
-
-      @Override
-      public ClusterConnection getClusterConnection() {
+      public Connection getConnection() {
         return mockedConnection;
       }
     };
@@ -281,7 +276,7 @@ public class TestMasterNoCluster {
       }
 
       @Override
-      public ClusterConnection getConnection() {
+      public Connection getConnection() {
         // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
         // the conf from the master; the conf will already have a Connection
         // associate so the below mocking of a connection will fail.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 56467cc..ef64c94 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -87,7 +87,7 @@ public class MockMasterServices extends MockNoopMasterServices {
   private MasterProcedureEnv procedureEnv;
   private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
   private ProcedureStore procedureStore;
-  private final ClusterConnection connection;
+  private final Connection connection;
   private final LoadBalancer balancer;
   private final ServerManager serverManager;
 
@@ -284,7 +284,7 @@ public class MockMasterServices extends MockNoopMasterServices {
   }
 
   @Override
-  public ClusterConnection getConnection() {
+  public Connection getConnection() {
     return this.connection;
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index c5fad32..3d6466d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -228,7 +227,7 @@ public class TestHFileCleaner {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -261,12 +260,6 @@ public class TestHFileCleaner {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index fd11ff8..82c8684 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -164,7 +163,7 @@ public class TestHFileLinkCleaner {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -193,13 +192,6 @@ public class TestHFileLinkCleaner {
     public ChoreService getChoreService() {
       return null;
     }
-
-    @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
     @Override
     public FileSystem getFileSystem() {
       return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index 6a5fe9c..7434b88 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -363,7 +362,7 @@ public class TestLogsCleaner {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -394,11 +393,6 @@ public class TestLogsCleaner {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index 9791643..b16d377 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -252,7 +251,7 @@ public class TestReplicationHFileCleaner {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -285,12 +284,6 @@ public class TestReplicationHFileCleaner {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index b4d16c6..29e6dbb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -359,11 +359,11 @@ public class MasterProcedureTestingUtility {
   //  Procedure Helpers
   // ==========================================================================
   public static long generateNonceGroup(final HMaster master) {
-    return master.getClusterConnection().getNonceGenerator().getNonceGroup();
+    return master.getAsyncClusterConnection().getNonceGenerator().getNonceGroup();
   }
 
   public static long generateNonce(final HMaster master) {
-    return master.getClusterConnection().getNonceGenerator().newNonce();
+    return master.getAsyncClusterConnection().getNonceGenerator().newNonce();
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index fd02cf4..af8cfb8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.hbase.StartMiniClusterOption;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClientServiceCallable;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.RpcRetryingCaller;
@@ -66,7 +66,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener;
@@ -89,10 +88,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-
 /**
  * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of
  * the region server's bullkLoad functionality.
@@ -214,29 +209,17 @@ public class TestHRegionServerBulkLoad {
       }
       // bulk load HFiles
       BulkLoadHFiles.create(UTIL.getConfiguration()).bulkLoad(tableName, family2Files);
+      final Connection conn = UTIL.getConnection();
       // Periodically do compaction to reduce the number of open file handles.
       if (numBulkLoads.get() % 5 == 0) {
         RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf);
         RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
         // 5 * 50 = 250 open file handles!
-        ClientServiceCallable<Void> callable =
-          new ClientServiceCallable<Void>(UTIL.getConnection(), tableName, Bytes.toBytes("aaa"),
-            new RpcControllerFactory(UTIL.getConfiguration()).newController(),
-            HConstants.PRIORITY_UNSET) {
-            @Override
-            protected Void rpcCall() throws Exception {
-              LOG.debug(
-                "compacting " + getLocation() + " for row " + Bytes.toStringBinary(getRow()));
-              AdminProtos.AdminService.BlockingInterface server =
-                ((ClusterConnection) UTIL.getConnection()).getAdmin(getLocation().getServerName());
-              CompactRegionRequest request = RequestConverter.buildCompactRegionRequest(
-                getLocation().getRegionInfo().getRegionName(), true, null);
-              server.compactRegion(null, request);
-              numCompactions.incrementAndGet();
-              return null;
-            }
-          };
-        caller.callWithRetries(callable, Integer.MAX_VALUE);
+        try (RegionLocator locator = conn.getRegionLocator(tableName)) {
+          HRegionLocation loc = locator.getRegionLocation(Bytes.toBytes("aaa"), true);
+          conn.getAdmin().compactRegion(loc.getRegion().getRegionName());
+          numCompactions.incrementAndGet();
+        }
       }
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 4a359e4..7c6598d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
@@ -829,7 +828,7 @@ public class TestHeapMemoryManager {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -844,12 +843,6 @@ public class TestHeapMemoryManager {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
index 43da846..b52bf19 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
 import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -132,7 +131,7 @@ public class TestSplitLogWorker {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -142,12 +141,6 @@ public class TestSplitLogWorker {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index 9e9d1d6..82b2843 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
@@ -470,7 +469,7 @@ public class TestWALLockup {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -506,11 +505,6 @@ public class TestWALLockup {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index 62ab265..9d3283d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
@@ -209,7 +208,7 @@ public class TestReplicationTrackerZKImpl {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -245,12 +244,6 @@ public class TestReplicationTrackerZKImpl {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 427f319..3a1320c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -852,8 +851,9 @@ public abstract class TestReplicationSourceManager {
     public CoordinatedStateManager getCoordinatedStateManager() {
       return null;
     }
+
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -888,12 +888,6 @@ public abstract class TestReplicationSourceManager {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
     public FileSystem getFileSystem() {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 92c8e54..a2981fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
@@ -211,7 +210,7 @@ public class TestTokenAuthentication {
     }
 
     @Override
-    public ClusterConnection getConnection() {
+    public Connection getConnection() {
       return null;
     }
 
@@ -355,12 +354,6 @@ public class TestTokenAuthentication {
     }
 
     @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
     public Connection createConnection(Configuration conf) throws IOException {
       return null;
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index d25ccef..9e29763 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
@@ -74,9 +73,6 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-
 /**
  * This is the base class for  HBaseFsck's ability to detect reasons for inconsistent tables.
  *
@@ -98,7 +94,7 @@ public class BaseTestHBaseFsck {
   protected static RegionStates regionStates;
   protected static ExecutorService tableExecutorService;
   protected static ScheduledThreadPoolExecutor hbfsckExecutorService;
-  protected static ClusterConnection connection;
+  protected static Connection connection;
   protected static Admin admin;
 
   // for the instance, reset every test run
@@ -298,9 +294,6 @@ public class BaseTestHBaseFsck {
 
   /**
    * delete table in preparation for next test
-   *
-   * @param tablename
-   * @throws IOException
    */
   void cleanupTable(TableName tablename) throws Exception {
     if (tbl != null) {
@@ -319,10 +312,8 @@ public class BaseTestHBaseFsck {
     Collection<ServerName> regionServers = status.getLiveServerMetrics().keySet();
     Map<ServerName, List<String>> mm = new HashMap<>();
     for (ServerName hsi : regionServers) {
-      AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi);
-
       // list all online regions from this region server
-      List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
+      List<RegionInfo> regions = admin.getRegions(hsi);
       List<String> regionNames = new ArrayList<>(regions.size());
       for (RegionInfo hri : regions) {
         regionNames.add(hri.getRegionNameAsString());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 13212d2..380c1c7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -104,7 +103,7 @@ public class MockServer implements Server {
   }
 
   @Override
-  public ClusterConnection getConnection() {
+  public Connection getConnection() {
     return null;
   }
 
@@ -115,7 +114,6 @@ public class MockServer implements Server {
 
   @Override
   public boolean isAborted() {
-    // TODO Auto-generated method stub
     return this.aborted;
   }
 
@@ -125,12 +123,6 @@ public class MockServer implements Server {
   }
 
   @Override
-  public ClusterConnection getClusterConnection() {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
   public FileSystem getFileSystem() {
     return null;
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 0a66ec0..f245384 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
@@ -56,7 +56,7 @@ public abstract class MultiThreadedAction {
 
   protected final TableName tableName;
   protected final Configuration conf;
-  protected final ClusterConnection connection; // all reader / writer threads will share this connection
+  protected final Connection connection; // all reader / writer threads will share this connection
 
   protected int numThreads = 1;
 
@@ -151,7 +151,7 @@ public abstract class MultiThreadedAction {
     this.dataGenerator = dataGen;
     this.tableName = tableName;
     this.actionLetter = actionLetter;
-    this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
+    this.connection = ConnectionFactory.createConnection(conf);
   }
 
   public void start(long startKey, long endKey, int numThreads) throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
index 3686150..09ae96f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.io.hfile.TestHFile;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -74,7 +73,7 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck {
         TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
     regionStates = assignmentManager.getRegionStates();
 
-    connection = (ClusterConnection) TEST_UTIL.getConnection();
+    connection = TEST_UTIL.getConnection();
 
     admin = connection.getAdmin();
     admin.balancerSwitch(false, true);
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
index 36e513c..abaaba0 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
@@ -29,9 +29,7 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
-
 import javax.net.ssl.SSLException;
-
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index d947a86..2b3a80a 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -41,11 +41,11 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
@@ -144,7 +144,7 @@ public class TestThriftConnection {
 
   private static Connection createConnection(int port, boolean useHttp) throws IOException {
     Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
-    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL,
+    conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
         ThriftConnection.class.getName());
     if (useHttp) {
       conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,


[hbase] 10/27: HBASE-21779 Reimplement BulkLoadHFilesTool to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit cba47b4b884c5b378ee461ee831ec9310e7cfaf7
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Sat Feb 2 17:36:41 2019 +0800

    HBASE-21779 Reimplement BulkLoadHFilesTool to use AsyncClusterConnection
---
 .../backup/mapreduce/MapReduceRestoreJob.java      |   18 +-
 .../hadoop/hbase/backup/util/BackupUtils.java      |   17 +-
 .../hadoop/hbase/backup/util/RestoreTool.java      |    9 +-
 .../backup/TestIncrementalBackupWithBulkLoad.java  |    6 +-
 .../hadoop/hbase/client/ConnectionUtils.java       |   41 +
 .../hadoop/hbase/client/RawAsyncTableImpl.java     |   46 +-
 .../hadoop/hbase/client/SecureBulkLoadClient.java  |  150 ---
 ...estReplicationSyncUpToolWithBulkLoadedData.java |    6 +-
 .../hbase/mapreduce/IntegrationTestBulkLoad.java   |   26 +-
 .../hbase/mapreduce/IntegrationTestImportTsv.java  |    9 +-
 .../apache/hadoop/hbase/mapreduce/CopyTable.java   |   27 +-
 .../hadoop/hbase/mapreduce/HRegionPartitioner.java |    2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java    |   24 +-
 .../hbase/client/AsyncClusterConnection.java       |   19 +
 .../hbase/client/AsyncClusterConnectionImpl.java   |   55 +
 .../hbase/client/ClusterConnectionFactory.java     |   16 +-
 .../hbase/mapreduce/LoadIncrementalHFiles.java     |   77 --
 .../mob/compactions/PartitionedMobCompactor.java   |   19 +-
 .../replication/regionserver/HFileReplicator.java  |  125 +-
 .../replication/regionserver/ReplicationSink.java  |   43 +-
 .../regionserver/WALEntrySinkFilter.java           |    8 +-
 .../hadoop/hbase/tool/BulkLoadHFilesTool.java      |  998 ++++++++++++++-
 .../hadoop/hbase/tool/LoadIncrementalHFiles.java   | 1285 --------------------
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |   11 +-
 .../hbase/client/DummyAsyncClusterConnection.java  |  155 +++
 .../hadoop/hbase/client/DummyAsyncRegistry.java    |   60 +
 .../hadoop/hbase/client/DummyAsyncTable.java       |  159 +++
 .../hbase/client/TestReplicaWithCluster.java       |   40 +-
 .../coprocessor/TestRegionObserverInterface.java   |    4 +-
 .../hbase/quotas/SpaceQuotaHelperForTests.java     |   45 +-
 .../hbase/quotas/TestLowLatencySpaceQuotas.java    |   17 +-
 .../hadoop/hbase/quotas/TestSpaceQuotas.java       |   30 +-
 .../regionserver/TestHRegionServerBulkLoad.java    |   69 +-
 .../regionserver/TestScannerWithBulkload.java      |   19 +-
 .../regionserver/TestSecureBulkLoadManager.java    |   27 +-
 .../hbase/replication/TestMasterReplication.java   |    6 +-
 .../regionserver/TestReplicationSink.java          |    6 +-
 .../regionserver/TestWALEntrySinkFilter.java       |  429 +------
 .../security/access/TestAccessController.java      |   12 +-
 ...rementalHFiles.java => TestBulkLoadHFiles.java} |  157 ++-
 .../tool/TestBulkLoadHFilesSplitRecovery.java      |  486 ++++++++
 .../TestLoadIncrementalHFilesSplitRecovery.java    |  630 ----------
 ...alHFiles.java => TestSecureBulkLoadHFiles.java} |   11 +-
 ... => TestSecureBulkLoadHFilesSplitRecovery.java} |    9 +-
 44 files changed, 2363 insertions(+), 3045 deletions(-)

diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
index 1256289..9daa282 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
@@ -17,11 +17,9 @@
  */
 package org.apache.hadoop.hbase.backup.mapreduce;
 
-import static org.apache.hadoop.hbase.backup.util.BackupUtils.failed;
 import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
 
 import java.io.IOException;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -29,7 +27,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.RestoreJob;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.util.Tool;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -42,8 +40,7 @@ import org.slf4j.LoggerFactory;
  * HFiles which are aligned with a region boundaries of a table being
  * restored.
  *
- * The resulting HFiles then are loaded using HBase bulk load tool
- * {@link LoadIncrementalHFiles}
+ * The resulting HFiles then are loaded using HBase bulk load tool {@link BulkLoadHFiles}.
  */
 @InterfaceAudience.Private
 public class MapReduceRestoreJob implements RestoreJob {
@@ -88,23 +85,20 @@ public class MapReduceRestoreJob implements RestoreJob {
       };
 
       int result;
-      int loaderResult;
       try {
 
         player.setConf(getConf());
         result = player.run(playerArgs);
         if (succeeded(result)) {
           // do bulk load
-          LoadIncrementalHFiles loader = BackupUtils.createLoader(getConf());
+          BulkLoadHFiles loader = BackupUtils.createLoader(getConf());
           if (LOG.isDebugEnabled()) {
             LOG.debug("Restoring HFiles from directory " + bulkOutputPath);
           }
-          String[] args = { bulkOutputPath.toString(), newTableNames[i].getNameAsString() };
-          loaderResult = loader.run(args);
 
-          if (failed(loaderResult)) {
-            throw new IOException("Can not restore from backup directory " + dirs
-                + " (check Hadoop and HBase logs). Bulk loader return code =" + loaderResult);
+          if (loader.bulkLoad(newTableNames[i], bulkOutputPath).isEmpty()) {
+            throw new IOException("Can not restore from backup directory " + dirs +
+              " (check Hadoop and HBase logs). Bulk loader returns null");
           }
         } else {
           throw new IOException("Can not restore from backup directory " + dirs
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
index af8b954..fe2a977 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
@@ -30,7 +30,6 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.TreeMap;
 import java.util.TreeSet;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -54,7 +53,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -722,7 +721,7 @@ public final class BackupUtils {
     return result == 0;
   }
 
-  public static LoadIncrementalHFiles createLoader(Configuration config) throws IOException {
+  public static BulkLoadHFiles createLoader(Configuration config) {
     // set configuration for restore:
     // LoadIncrementalHFile needs more time
     // <name>hbase.rpc.timeout</name> <value>600000</value>
@@ -732,15 +731,9 @@ public final class BackupUtils {
 
     // By default, it is 32 and loader will fail if # of files in any region exceed this
     // limit. Bad for snapshot restore.
-    conf.setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE);
-    conf.set(LoadIncrementalHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes");
-    LoadIncrementalHFiles loader;
-    try {
-      loader = new LoadIncrementalHFiles(conf);
-    } catch (Exception e) {
-      throw new IOException(e);
-    }
-    return loader;
+    conf.setInt(BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE);
+    conf.set(BulkLoadHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes");
+    return BulkLoadHFiles.create(conf);
   }
 
   public static String findMostRecentBackupId(String[] backupIds) {
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index 13b183d..92254fa 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -25,7 +25,6 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.TreeMap;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -46,7 +45,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -451,12 +450,12 @@ public class RestoreTool {
         }
       }
     }
-    return LoadIncrementalHFiles.inferBoundaries(map);
+    return BulkLoadHFilesTool.inferBoundaries(map);
   }
 
   /**
-   * Prepare the table for bulkload, most codes copied from
-   * {@link LoadIncrementalHFiles#createTable(TableName, String, Admin)}
+   * Prepare the table for bulkload, most codes copied from {@code createTable} method in
+   * {@code BulkLoadHFilesTool}.
    * @param conn connection
    * @param tableBackupPath path
    * @param tableName table name
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
index 74dd569..82f0fb7 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.tool.TestLoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.TestBulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.Assert;
@@ -92,7 +92,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
 
     int NB_ROWS2 = 20;
     LOG.debug("bulk loading into " + testName);
-    int actual = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
+    int actual = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
         qualName, false, null, new byte[][][] {
           new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
           new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
@@ -105,7 +105,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     assertTrue(checkSucceeded(backupIdIncMultiple));
     // #4 bulk load again
     LOG.debug("bulk loading into " + testName);
-    int actual1 = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
+    int actual1 = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
       qualName, false, null,
       new byte[][][] { new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("qqq") },
         new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, },
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 4a2fa3a..4ec7e32 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
@@ -59,6 +60,7 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 import org.apache.hbase.thirdparty.io.netty.util.Timer;
 
@@ -695,4 +697,43 @@ public final class ConnectionUtils {
         metrics -> ResultStatsUtil.updateStats(metrics, serverName, regionName, regionLoadStats));
     });
   }
+
+  @FunctionalInterface
+  interface Converter<D, I, S> {
+    D convert(I info, S src) throws IOException;
+  }
+
+  @FunctionalInterface
+  interface RpcCall<RESP, REQ> {
+    void call(ClientService.Interface stub, HBaseRpcController controller, REQ req,
+        RpcCallback<RESP> done);
+  }
+
+  static <REQ, PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller,
+      HRegionLocation loc, ClientService.Interface stub, REQ req,
+      Converter<PREQ, byte[], REQ> reqConvert, RpcCall<PRESP, PREQ> rpcCall,
+      Converter<RESP, HBaseRpcController, PRESP> respConverter) {
+    CompletableFuture<RESP> future = new CompletableFuture<>();
+    try {
+      rpcCall.call(stub, controller, reqConvert.convert(loc.getRegion().getRegionName(), req),
+        new RpcCallback<PRESP>() {
+
+          @Override
+          public void run(PRESP resp) {
+            if (controller.failed()) {
+              future.completeExceptionally(controller.getFailed());
+            } else {
+              try {
+                future.complete(respConverter.convert(controller, resp));
+              } catch (IOException e) {
+                future.completeExceptionally(e);
+              }
+            }
+          }
+        });
+    } catch (IOException e) {
+      future.completeExceptionally(e);
+    }
+    return future;
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
index 8050137..c357b1f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder;
+import org.apache.hadoop.hbase.client.ConnectionUtils.Converter;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
@@ -156,51 +157,12 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
     return conn.getRegionLocator(tableName);
   }
 
-  @FunctionalInterface
-  private interface Converter<D, I, S> {
-    D convert(I info, S src) throws IOException;
-  }
-
-  @FunctionalInterface
-  private interface RpcCall<RESP, REQ> {
-    void call(ClientService.Interface stub, HBaseRpcController controller, REQ req,
-        RpcCallback<RESP> done);
-  }
-
-  private static <REQ, PREQ, PRESP, RESP> CompletableFuture<RESP> call(
-      HBaseRpcController controller, HRegionLocation loc, ClientService.Interface stub, REQ req,
-      Converter<PREQ, byte[], REQ> reqConvert, RpcCall<PRESP, PREQ> rpcCall,
-      Converter<RESP, HBaseRpcController, PRESP> respConverter) {
-    CompletableFuture<RESP> future = new CompletableFuture<>();
-    try {
-      rpcCall.call(stub, controller, reqConvert.convert(loc.getRegion().getRegionName(), req),
-        new RpcCallback<PRESP>() {
-
-          @Override
-          public void run(PRESP resp) {
-            if (controller.failed()) {
-              future.completeExceptionally(controller.getFailed());
-            } else {
-              try {
-                future.complete(respConverter.convert(controller, resp));
-              } catch (IOException e) {
-                future.completeExceptionally(e);
-              }
-            }
-          }
-        });
-    } catch (IOException e) {
-      future.completeExceptionally(e);
-    }
-    return future;
-  }
-
   private static <REQ, RESP> CompletableFuture<RESP> mutate(HBaseRpcController controller,
       HRegionLocation loc, ClientService.Interface stub, REQ req,
       Converter<MutateRequest, byte[], REQ> reqConvert,
       Converter<RESP, HBaseRpcController, MutateResponse> respConverter) {
-    return call(controller, loc, stub, req, reqConvert, (s, c, r, done) -> s.mutate(c, r, done),
-      respConverter);
+    return ConnectionUtils.call(controller, loc, stub, req, reqConvert,
+      (s, c, r, done) -> s.mutate(c, r, done), respConverter);
   }
 
   private static <REQ> CompletableFuture<Void> voidMutate(HBaseRpcController controller,
@@ -247,7 +209,7 @@ class RawAsyncTableImpl implements AsyncTable<AdvancedScanResultConsumer> {
 
   private CompletableFuture<Result> get(Get get, int replicaId) {
     return this.<Result, Get> newCaller(get, readRpcTimeoutNs)
-      .action((controller, loc, stub) -> RawAsyncTableImpl
+      .action((controller, loc, stub) -> ConnectionUtils
         .<Get, GetRequest, GetResponse, Result> call(controller, loc, stub, get,
           RequestConverter::buildGetRequest, (s, c, req, done) -> s.get(c, req, done),
           (c, resp) -> ProtobufUtil.toResult(resp.getResult(), c.cellScanner())))
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
deleted file mode 100644
index 2186271..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.security.token.Token;
-
-import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET;
-
-/**
- * Client proxy for SecureBulkLoadProtocol
- */
-@InterfaceAudience.Private
-public class SecureBulkLoadClient {
-  private Table table;
-  private final RpcControllerFactory rpcControllerFactory;
-
-  public SecureBulkLoadClient(final Configuration conf, Table table) {
-    this.table = table;
-    this.rpcControllerFactory = new RpcControllerFactory(conf);
-  }
-
-  public String prepareBulkLoad(final Connection conn) throws IOException {
-    try {
-      ClientServiceCallable<String> callable = new ClientServiceCallable<String>(conn,
-          table.getName(), HConstants.EMPTY_START_ROW,
-          this.rpcControllerFactory.newController(), PRIORITY_UNSET) {
-        @Override
-        protected String rpcCall() throws Exception {
-          byte[] regionName = getLocation().getRegionInfo().getRegionName();
-          RegionSpecifier region =
-              RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
-          PrepareBulkLoadRequest request = PrepareBulkLoadRequest.newBuilder()
-              .setTableName(ProtobufUtil.toProtoTableName(table.getName()))
-              .setRegion(region).build();
-          PrepareBulkLoadResponse response = getStub().prepareBulkLoad(null, request);
-          return response.getBulkToken();
-        }
-      };
-      return RpcRetryingCallerFactory.instantiate(conn.getConfiguration(), null)
-          .<String> newCaller().callWithRetries(callable, Integer.MAX_VALUE);
-    } catch (Throwable throwable) {
-      throw new IOException(throwable);
-    }
-  }
-
-  public void cleanupBulkLoad(final Connection conn, final String bulkToken) throws IOException {
-    try {
-      ClientServiceCallable<Void> callable = new ClientServiceCallable<Void>(conn,
-          table.getName(), HConstants.EMPTY_START_ROW, this.rpcControllerFactory.newController(), PRIORITY_UNSET) {
-        @Override
-        protected Void rpcCall() throws Exception {
-          byte[] regionName = getLocation().getRegionInfo().getRegionName();
-          RegionSpecifier region = RequestConverter.buildRegionSpecifier(
-              RegionSpecifierType.REGION_NAME, regionName);
-          CleanupBulkLoadRequest request =
-              CleanupBulkLoadRequest.newBuilder().setRegion(region).setBulkToken(bulkToken).build();
-          getStub().cleanupBulkLoad(null, request);
-          return null;
-        }
-      };
-      RpcRetryingCallerFactory.instantiate(conn.getConfiguration(), null)
-          .<Void> newCaller().callWithRetries(callable, Integer.MAX_VALUE);
-    } catch (Throwable throwable) {
-      throw new IOException(throwable);
-    }
-  }
-
-  /**
-   * Securely bulk load a list of HFiles using client protocol.
-   *
-   * @param client
-   * @param familyPaths
-   * @param regionName
-   * @param assignSeqNum
-   * @param userToken
-   * @param bulkToken
-   * @return true if all are loaded
-   * @throws IOException
-   */
-  public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client,
-      final List<Pair<byte[], String>> familyPaths,
-      final byte[] regionName, boolean assignSeqNum,
-      final Token<?> userToken, final String bulkToken) throws IOException {
-    return secureBulkLoadHFiles(client, familyPaths, regionName, assignSeqNum, userToken, bulkToken,
-        false);
-  }
-
-  /**
-   * Securely bulk load a list of HFiles using client protocol.
-   *
-   * @param client
-   * @param familyPaths
-   * @param regionName
-   * @param assignSeqNum
-   * @param userToken
-   * @param bulkToken
-   * @param copyFiles
-   * @return true if all are loaded
-   * @throws IOException
-   */
-  public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client,
-      final List<Pair<byte[], String>> familyPaths,
-      final byte[] regionName, boolean assignSeqNum,
-      final Token<?> userToken, final String bulkToken, boolean copyFiles) throws IOException {
-    BulkLoadHFileRequest request =
-        RequestConverter.buildBulkLoadHFileRequest(familyPaths, regionName, assignSeqNum,
-          userToken, bulkToken, copyFiles);
-
-    try {
-      BulkLoadHFileResponse response = client.bulkLoadHFile(null, request);
-      return response.getLoaded();
-    } catch (Exception se) {
-      throw ProtobufUtil.handleRemoteException(se);
-    }
-  }
-}
diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
index eb575c5..3e823c3 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.replication.regionserver.TestSourceFSConfigurationProvider;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.HFileTestUtil;
 import org.junit.BeforeClass;
@@ -218,9 +218,7 @@ public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplication
     }
 
     final TableName tableName = source.getName();
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(utility1.getConfiguration());
-    String[] args = { dir.toString(), tableName.toString() };
-    loader.run(args);
+    BulkLoadHFiles.create(utility1.getConfiguration()).bulkLoad(tableName, dir);
   }
 
   private void wait(Table target, int expectedCount, String msg) throws IOException,
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index a28c9f6..79dfe6c 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -60,7 +59,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.RegionSplitter;
@@ -86,6 +85,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
@@ -292,24 +292,18 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
 
     // Set where to place the hfiles.
     FileOutputFormat.setOutputPath(job, p);
-    try (Connection conn = ConnectionFactory.createConnection(conf);
-        Admin admin = conn.getAdmin();
-        Table table = conn.getTable(getTablename());
-        RegionLocator regionLocator = conn.getRegionLocator(getTablename())) {
-
+    try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin();
+      RegionLocator regionLocator = conn.getRegionLocator(getTablename())) {
       // Configure the partitioner and other things needed for HFileOutputFormat.
-      HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
-
+      HFileOutputFormat2.configureIncrementalLoad(job, admin.getDescriptor(getTablename()),
+        regionLocator);
       // Run the job making sure it works.
       assertEquals(true, job.waitForCompletion(true));
-
-      // Create a new loader.
-      LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
-
-      // Load the HFiles in.
-      loader.doBulkLoad(p, admin, table, regionLocator);
     }
-
+    // Create a new loader.
+    BulkLoadHFiles loader = BulkLoadHFiles.create(conf);
+    // Load the HFiles in.
+    loader.bulkLoad(getTablename(), p);
     // Delete the files.
     util.getTestFileSystem().delete(p, true);
   }
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
index ab5f2bb..c80d61c 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
@@ -29,7 +29,6 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -45,7 +44,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.hadoop.util.Tool;
@@ -60,7 +59,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Validate ImportTsv + LoadIncrementalHFiles on a distributed cluster.
+ * Validate ImportTsv + BulkLoadFiles on a distributed cluster.
  */
 @Category(IntegrationTests.class)
 public class IntegrationTestImportTsv extends Configured implements Tool {
@@ -141,8 +140,8 @@ public class IntegrationTestImportTsv extends Configured implements Tool {
 
     String[] args = { hfiles.toString(), tableName.getNameAsString() };
     LOG.info(format("Running LoadIncrememntalHFiles with args: %s", Arrays.asList(args)));
-    assertEquals("Loading HFiles failed.",
-      0, ToolRunner.run(new LoadIncrementalHFiles(new Configuration(getConf())), args));
+    assertEquals("Loading HFiles failed.", 0,
+      ToolRunner.run(new BulkLoadHFilesTool(getConf()), args));
 
     Table table = null;
     Scan scan = new Scan() {{
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index b59c9e6..a443b4b 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -22,28 +22,28 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.UUID;
-
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.mapreduce.Import.CellImporter;
-import org.apache.hadoop.hbase.mapreduce.Import.Importer;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.mapreduce.Import.CellImporter;
+import org.apache.hadoop.hbase.mapreduce.Import.Importer;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tool used to copy a table to another one which can be on a different setup.
@@ -416,13 +416,12 @@ public class CopyTable extends Configured implements Tool {
     int code = 0;
     if (bulkload) {
       LOG.info("Trying to bulk load data to destination table: " + dstTableName);
-      LOG.info("command: ./bin/hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles {} {}",
+      LOG.info("command: ./bin/hbase {} {} {}", BulkLoadHFilesTool.NAME,
         this.bulkloadDir.toString(), this.dstTableName);
-      code = new LoadIncrementalHFiles(this.getConf())
-          .run(new String[] { this.bulkloadDir.toString(), this.dstTableName });
-      if (code == 0) {
-        // bulkloadDir is deleted only LoadIncrementalHFiles was successful so that one can rerun
-        // LoadIncrementalHFiles.
+      if (!BulkLoadHFiles.create(getConf()).bulkLoad(TableName.valueOf(dstTableName), bulkloadDir)
+        .isEmpty()) {
+        // bulkloadDir is deleted only BulkLoadHFiles was successful so that one can rerun
+        // BulkLoadHFiles.
         FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
         if (!fs.delete(this.bulkloadDir, true)) {
           LOG.error("Deleting folder " + bulkloadDir + " failed!");
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
index b48ecf0..62fc06d 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.mapreduce.Partitioner;
  *
  * <p>This class is not suitable as partitioner creating hfiles
  * for incremental bulk loads as region spread will likely change between time of
- * hfile creation and load time. See {@link org.apache.hadoop.hbase.tool.LoadIncrementalHFiles}
+ * hfile creation and load time. See {@link org.apache.hadoop.hbase.tool.BulkLoadHFiles}
  * and <a href="http://hbase.apache.org/book.html#arch.bulk.load">Bulk Load</a>.</p>
  *
  * @param <KEY>  The type of the key.
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 5c0bb2b..c9f5a2e 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -90,7 +90,7 @@ import org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
@@ -707,18 +707,17 @@ public class TestHFileOutputFormat2  {
         }
         Table currentTable = allTables.get(tableNameStr);
         TableName currentTableName = currentTable.getName();
-        new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, singleTableInfo
-                .getRegionLocator());
+        BulkLoadHFiles.create(conf).bulkLoad(currentTableName, tableDir);
 
         // Ensure data shows up
         int expectedRows = 0;
         if (putSortReducer) {
           // no rows should be extracted
-          assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows,
+          assertEquals("BulkLoadHFiles should put expected data in table", expectedRows,
                   util.countRows(currentTable));
         } else {
           expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
-          assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows,
+          assertEquals("BulkLoadHFiles should put expected data in table", expectedRows,
                   util.countRows(currentTable));
           Scan scan = new Scan();
           ResultScanner results = currentTable.getScanner(scan);
@@ -1248,14 +1247,14 @@ public class TestHFileOutputFormat2  {
       for (int i = 0; i < 2; i++) {
         Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
         runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table
-                .getTableDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false);
+                .getDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false);
         // Perform the actual load
-        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
+        BulkLoadHFiles.create(conf).bulkLoad(table.getName(), testDir);
       }
 
       // Ensure data shows up
       int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
-      assertEquals("LoadIncrementalHFiles should put expected data in table",
+      assertEquals("BulkLoadHFiles should put expected data in table",
           expectedRows, util.countRows(table));
 
       // should have a second StoreFile now
@@ -1340,15 +1339,16 @@ public class TestHFileOutputFormat2  {
           true);
 
       RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]);
-      runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table
-                      .getTableDescriptor(), regionLocator)), testDir, false);
+      runIncrementalPELoad(conf,
+        Arrays.asList(new HFileOutputFormat2.TableInfo(table.getDescriptor(), regionLocator)),
+        testDir, false);
 
       // Perform the actual load
-      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator);
+      BulkLoadHFiles.create(conf).bulkLoad(table.getName(), testDir);
 
       // Ensure data shows up
       int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
-      assertEquals("LoadIncrementalHFiles should put expected data in table",
+      assertEquals("BulkLoadHFiles should put expected data in table",
           expectedRows + 1, util.countRows(table));
 
       // should have a second StoreFile now
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index 0ad77ba..c3f8f8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -23,7 +23,9 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.security.token.Token;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
@@ -66,4 +68,21 @@ public interface AsyncClusterConnection extends AsyncConnection {
    */
   CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
       boolean reload);
+
+  /**
+   * Return the token for this bulk load.
+   */
+  CompletableFuture<String> prepareBulkLoad(TableName tableName);
+
+  /**
+   * Securely bulk load a list of HFiles.
+   * @param row used to locate the region
+   */
+  CompletableFuture<Boolean> bulkLoad(TableName tableName, List<Pair<byte[], String>> familyPaths,
+      byte[] row, boolean assignSeqNum, Token<?> userToken, String bulkToken, boolean copyFiles);
+
+  /**
+   * Clean up after finishing bulk load, no matter success or not.
+   */
+  CompletableFuture<Void> cleanupBulkLoad(TableName tableName, String bulkToken);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
index d61f01f..328b959 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
@@ -21,15 +21,28 @@ import java.net.SocketAddress;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.security.token.Token;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 
 /**
  * The implementation of AsyncClusterConnection.
@@ -77,4 +90,46 @@ class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClu
       boolean reload) {
     return getLocator().getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L);
   }
+
+  @Override
+  public CompletableFuture<String> prepareBulkLoad(TableName tableName) {
+    return callerFactory.<String> single().table(tableName).row(HConstants.EMPTY_START_ROW)
+      .action((controller, loc, stub) -> ConnectionUtils
+        .<TableName, PrepareBulkLoadRequest, PrepareBulkLoadResponse, String> call(controller, loc,
+          stub, tableName, (rn, tn) -> {
+            RegionSpecifier region =
+              RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn);
+            return PrepareBulkLoadRequest.newBuilder()
+              .setTableName(ProtobufUtil.toProtoTableName(tn)).setRegion(region).build();
+          }, (s, c, req, done) -> s.prepareBulkLoad(c, req, done),
+          (c, resp) -> resp.getBulkToken()))
+      .call();
+  }
+
+  @Override
+  public CompletableFuture<Boolean> bulkLoad(TableName tableName,
+      List<Pair<byte[], String>> familyPaths, byte[] row, boolean assignSeqNum, Token<?> userToken,
+      String bulkToken, boolean copyFiles) {
+    return callerFactory.<Boolean> single().table(tableName).row(row)
+      .action((controller, loc, stub) -> ConnectionUtils
+        .<Void, BulkLoadHFileRequest, BulkLoadHFileResponse, Boolean> call(controller, loc, stub,
+          null,
+          (rn, nil) -> RequestConverter.buildBulkLoadHFileRequest(familyPaths, rn, assignSeqNum,
+            userToken, bulkToken, copyFiles),
+          (s, c, req, done) -> s.bulkLoadHFile(c, req, done), (c, resp) -> resp.getLoaded()))
+      .call();
+  }
+
+  @Override
+  public CompletableFuture<Void> cleanupBulkLoad(TableName tableName, String bulkToken) {
+    return callerFactory.<Void> single().table(tableName).row(HConstants.EMPTY_START_ROW)
+      .action((controller, loc, stub) -> ConnectionUtils
+        .<String, CleanupBulkLoadRequest, CleanupBulkLoadResponse, Void> call(controller, loc, stub,
+          bulkToken, (rn, bt) -> {
+            RegionSpecifier region =
+              RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn);
+            return CleanupBulkLoadRequest.newBuilder().setRegion(region).setBulkToken(bt).build();
+          }, (s, c, req, done) -> s.cleanupBulkLoad(c, req, done), (c, resp) -> null))
+      .call();
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
index 2670420..46c0f5a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -19,9 +19,11 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.net.SocketAddress;
+import java.security.PrivilegedExceptionAction;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.FutureUtils;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -30,6 +32,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ClusterConnectionFactory {
 
+  public static final String HBASE_SERVER_CLUSTER_CONNECTION_IMPL =
+    "hbase.server.cluster.connection.impl";
+
   private ClusterConnectionFactory() {
   }
 
@@ -46,6 +51,15 @@ public final class ClusterConnectionFactory {
       SocketAddress localAddress, User user) throws IOException {
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(conf);
     String clusterId = FutureUtils.get(registry.getClusterId());
-    return new AsyncClusterConnectionImpl(conf, registry, clusterId, localAddress, user);
+    Class<? extends AsyncClusterConnection> clazz =
+      conf.getClass(HBASE_SERVER_CLUSTER_CONNECTION_IMPL, AsyncClusterConnectionImpl.class,
+        AsyncClusterConnection.class);
+    try {
+      return user
+        .runAs((PrivilegedExceptionAction<? extends AsyncClusterConnection>) () -> ReflectionUtils
+          .newInstance(clazz, conf, registry, clusterId, localAddress, user));
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
deleted file mode 100644
index 6f5412f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mapreduce;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Tool to load the output of HFileOutputFormat into an existing table.
- * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
- *             {@link org.apache.hadoop.hbase.tool.LoadIncrementalHFiles} instead.
- */
-@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NM_SAME_SIMPLE_NAME_AS_SUPERCLASS",
-    justification = "Temporary glue. To be removed")
-@Deprecated
-@InterfaceAudience.Public
-public class LoadIncrementalHFiles extends org.apache.hadoop.hbase.tool.LoadIncrementalHFiles {
-
-  /**
-   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
-   *             {@link org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem} instead.
-   */
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NM_SAME_SIMPLE_NAME_AS_SUPERCLASS",
-      justification = "Temporary glue. To be removed")
-  @Deprecated
-  @InterfaceAudience.Public
-  public static class LoadQueueItem
-      extends org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem {
-
-    public LoadQueueItem(byte[] family, Path hfilePath) {
-      super(family, hfilePath);
-    }
-  }
-
-  public LoadIncrementalHFiles(Configuration conf) {
-    super(conf);
-  }
-
-  public Map<LoadQueueItem, ByteBuffer> run(String dirPath, Map<byte[], List<Path>> map,
-      TableName tableName) throws IOException {
-    Map<org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem, ByteBuffer> originRet;
-    if (dirPath != null) {
-      originRet = run(dirPath, tableName);
-    } else {
-      originRet = run(map, tableName);
-    }
-    Map<LoadQueueItem, ByteBuffer> ret = new HashMap<>();
-    originRet.forEach((k, v) -> {
-      ret.put(new LoadQueueItem(k.getFamily(), k.getFilePath()), v);
-    });
-    return ret;
-  }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index 9f1ab96..a5823ec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -40,7 +40,6 @@ import java.util.TreeMap;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -82,8 +81,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.regionserver.StoreScanner;
 import org.apache.hadoop.hbase.security.EncryptionUtil;
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
@@ -91,6 +89,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 /**
  * An implementation of {@link MobCompactor} that compacts the mob files in partitions.
  */
@@ -675,7 +675,7 @@ public class PartitionedMobCompactor extends MobCompactor {
         cleanupTmpMobFile = false;
         cleanupCommittedMobFile = true;
         // bulkload the ref file
-        bulkloadRefFile(connection, table, bulkloadPathOfPartition, filePath.getName());
+        bulkloadRefFile(table.getName(), bulkloadPathOfPartition, filePath.getName());
         cleanupCommittedMobFile = false;
         newFiles.add(new Path(mobFamilyDir, filePath.getName()));
       }
@@ -818,21 +818,16 @@ public class PartitionedMobCompactor extends MobCompactor {
 
   /**
    * Bulkloads the current file.
-   *
-   * @param connection to use to get admin/RegionLocator
-   * @param table The current table.
+   * @param tableName The table to load into.
    * @param bulkloadDirectory The path of bulkload directory.
    * @param fileName The current file name.
    * @throws IOException if IO failure is encountered
    */
-  private void bulkloadRefFile(Connection connection, Table table, Path bulkloadDirectory,
-      String fileName)
+  private void bulkloadRefFile(TableName tableName, Path bulkloadDirectory, String fileName)
       throws IOException {
     // bulkload the ref file
     try {
-      LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
-      bulkload.doBulkLoad(bulkloadDirectory, connection.getAdmin(), table,
-          connection.getRegionLocator(table.getName()));
+      BulkLoadHFiles.create(conf).bulkLoad(tableName, bulkloadDirectory);
     } catch (Exception e) {
       throw new IOException(e);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java
index 1f44817..6204ea5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java
@@ -1,17 +1,22 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
- * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
- * for the specific language governing permissions and limitations under the License.
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package org.apache.hadoop.hbase.replication.regionserver;
 
-import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -30,33 +35,32 @@ import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.FsDelegationToken;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles.LoadQueueItem;
+import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * It is used for replicating HFile entries. It will first copy parallely all the hfiles to a local
- * staging directory and then it will use ({@link LoadIncrementalHFiles} to prepare a collection of
+ * staging directory and then it will use ({@link BulkLoadHFiles} to prepare a collection of
  * {@link LoadQueueItem} which will finally be loaded(replicated) into the table of this cluster.
  */
 @InterfaceAudience.Private
@@ -82,7 +86,7 @@ public class HFileReplicator {
   private FsDelegationToken fsDelegationToken;
   private UserProvider userProvider;
   private Configuration conf;
-  private Connection connection;
+  private AsyncClusterConnection connection;
   private Path hbaseStagingDir;
   private ThreadPoolExecutor exec;
   private int maxCopyThreads;
@@ -91,7 +95,7 @@ public class HFileReplicator {
   public HFileReplicator(Configuration sourceClusterConf,
       String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath,
       Map<String, List<Pair<byte[], List<String>>>> tableQueueMap, Configuration conf,
-      Connection connection) throws IOException {
+      AsyncClusterConnection connection) throws IOException {
     this.sourceClusterConf = sourceClusterConf;
     this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath;
     this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath;
@@ -128,96 +132,61 @@ public class HFileReplicator {
       String tableNameString = tableStagingDir.getKey();
       Path stagingDir = tableStagingDir.getValue();
 
-      LoadIncrementalHFiles loadHFiles = null;
-      try {
-        loadHFiles = new LoadIncrementalHFiles(conf);
-      } catch (Exception e) {
-        LOG.error("Failed to initialize LoadIncrementalHFiles for replicating bulk loaded"
-            + " data.", e);
-        throw new IOException(e);
-      }
-      Configuration newConf = HBaseConfiguration.create(conf);
-      newConf.set(LoadIncrementalHFiles.CREATE_TABLE_CONF_KEY, "no");
-      loadHFiles.setConf(newConf);
-
       TableName tableName = TableName.valueOf(tableNameString);
-      Table table = this.connection.getTable(tableName);
 
       // Prepare collection of queue of hfiles to be loaded(replicated)
       Deque<LoadQueueItem> queue = new LinkedList<>();
-      loadHFiles.prepareHFileQueue(stagingDir, table, queue, false);
+      BulkLoadHFilesTool.prepareHFileQueue(conf, connection, tableName, stagingDir, queue, false,
+        false);
 
       if (queue.isEmpty()) {
         LOG.warn("Replication process did not find any files to replicate in directory "
             + stagingDir.toUri());
         return null;
       }
-
-      try (RegionLocator locator = connection.getRegionLocator(tableName)) {
-
-        fsDelegationToken.acquireDelegationToken(sinkFs);
-
-        // Set the staging directory which will be used by LoadIncrementalHFiles for loading the
-        // data
-        loadHFiles.setBulkToken(stagingDir.toString());
-
-        doBulkLoad(loadHFiles, table, queue, locator, maxRetries);
+      fsDelegationToken.acquireDelegationToken(sinkFs);
+      try {
+        doBulkLoad(conf, tableName, stagingDir, queue, maxRetries);
       } finally {
-        cleanup(stagingDir.toString(), table);
+        cleanup(stagingDir);
       }
     }
     return null;
   }
 
-  private void doBulkLoad(LoadIncrementalHFiles loadHFiles, Table table,
-      Deque<LoadQueueItem> queue, RegionLocator locator, int maxRetries) throws IOException {
-    int count = 0;
-    Pair<byte[][], byte[][]> startEndKeys;
-    while (!queue.isEmpty()) {
-      // need to reload split keys each iteration.
-      startEndKeys = locator.getStartEndKeys();
+  private void doBulkLoad(Configuration conf, TableName tableName, Path stagingDir,
+      Deque<LoadQueueItem> queue, int maxRetries) throws IOException {
+    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf);
+    // Set the staging directory which will be used by BulkLoadHFilesTool for loading the data
+    loader.setBulkToken(stagingDir.toString());
+    for (int count = 0; !queue.isEmpty(); count++) {
       if (count != 0) {
-        LOG.warn("Error occurred while replicating HFiles, retry attempt " + count + " with "
-            + queue.size() + " files still remaining to replicate.");
+        LOG.warn("Error occurred while replicating HFiles, retry attempt " + count + " with " +
+          queue.size() + " files still remaining to replicate.");
       }
 
       if (maxRetries != 0 && count >= maxRetries) {
-        throw new IOException("Retry attempted " + count
-            + " times without completing, bailing out.");
+        throw new IOException(
+          "Retry attempted " + count + " times without completing, bailing out.");
       }
-      count++;
 
       // Try bulk load
-      loadHFiles.loadHFileQueue(table, connection, queue, startEndKeys);
+      loader.loadHFileQueue(connection, tableName, queue, false);
     }
   }
 
-  private void cleanup(String stagingDir, Table table) {
+  private void cleanup(Path stagingDir) {
     // Release the file system delegation token
     fsDelegationToken.releaseDelegationToken();
     // Delete the staging directory
     if (stagingDir != null) {
       try {
-        sinkFs.delete(new Path(stagingDir), true);
+        sinkFs.delete(stagingDir, true);
       } catch (IOException e) {
         LOG.warn("Failed to delete the staging directory " + stagingDir, e);
       }
     }
     // Do not close the file system
-
-    /*
-     * if (sinkFs != null) { try { sinkFs.close(); } catch (IOException e) { LOG.warn(
-     * "Failed to close the file system"); } }
-     */
-
-    // Close the table
-    if (table != null) {
-      try {
-        table.close();
-      } catch (IOException e) {
-        LOG.warn("Failed to close the table.", e);
-      }
-    }
   }
 
   private Map<String, Path> copyHFilesToStagingDir() throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index 3cd928a..e30e637 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -27,8 +26,9 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.TreeMap;
 import java.util.UUID;
+import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
-
+import java.util.stream.Collectors;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -40,16 +40,18 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncTable;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.client.Row;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
+import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -83,7 +85,7 @@ public class ReplicationSink {
   private final Configuration conf;
   // Volatile because of note in here -- look for double-checked locking:
   // http://www.oracle.com/technetwork/articles/javase/bloch-effective-08-qa-140880.html
-  private volatile Connection sharedConn;
+  private volatile AsyncClusterConnection sharedConn;
   private final MetricsSink metrics;
   private final AtomicLong totalReplicatedEdits = new AtomicLong();
   private final Object sharedConnLock = new Object();
@@ -390,37 +392,34 @@ public class ReplicationSink {
    * Do the changes and handle the pool
    * @param tableName table to insert into
    * @param allRows list of actions
-   * @throws IOException
    */
   private void batch(TableName tableName, Collection<List<Row>> allRows) throws IOException {
     if (allRows.isEmpty()) {
       return;
     }
-    Connection connection = getConnection();
-    try (Table table = connection.getTable(tableName)) {
-      for (List<Row> rows : allRows) {
-        table.batch(rows, null);
-      }
-    } catch (RetriesExhaustedWithDetailsException rewde) {
-      for (Throwable ex : rewde.getCauses()) {
-        if (ex instanceof TableNotFoundException) {
+    AsyncTable<?> table = getConnection().getTable(tableName);
+    List<Future<?>> futures = allRows.stream().map(table::batchAll).collect(Collectors.toList());
+    for (Future<?> future : futures) {
+      try {
+        FutureUtils.get(future);
+      } catch (RetriesExhaustedException e) {
+        if (e.getCause() instanceof TableNotFoundException) {
           throw new TableNotFoundException("'" + tableName + "'");
         }
+        throw e;
       }
-      throw rewde;
-    } catch (InterruptedException ix) {
-      throw (InterruptedIOException) new InterruptedIOException().initCause(ix);
     }
   }
 
-  private Connection getConnection() throws IOException {
+  private AsyncClusterConnection getConnection() throws IOException {
     // See https://en.wikipedia.org/wiki/Double-checked_locking
-    Connection connection = sharedConn;
+    AsyncClusterConnection connection = sharedConn;
     if (connection == null) {
       synchronized (sharedConnLock) {
         connection = sharedConn;
         if (connection == null) {
-          connection = ConnectionFactory.createConnection(conf);
+          connection = ClusterConnectionFactory.createAsyncClusterConnection(conf, null,
+            UserProvider.instantiate(conf).getCurrent());
           sharedConn = connection;
         }
       }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java
index f0b13e1..6f6ae1f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,9 +18,10 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
 
 /**
  * Implementations are installed on a Replication Sink called from inside
@@ -36,6 +37,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * source-side.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
+@InterfaceStability.Evolving
 public interface WALEntrySinkFilter {
   /**
    * Name of configuration to set with name of implementing WALEntrySinkFilter class.
@@ -46,7 +48,7 @@ public interface WALEntrySinkFilter {
    * Called after Construction.
    * Use passed Connection to keep any context the filter might need.
    */
-  void init(Connection connection);
+  void init(AsyncConnection conn);
 
   /**
    * @param table Table edit is destined for.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
index 795bd66..abf1fd8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
@@ -17,48 +17,1028 @@
  */
 package org.apache.hadoop.hbase.tool;
 
+import static java.lang.String.format;
+
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Collectors;
+import org.apache.commons.lang3.mutable.MutableInt;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.AsyncAdmin;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.HalfStoreFileReader;
+import org.apache.hadoop.hbase.io.Reference;
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.token.FsDelegationToken;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSVisitor;
+import org.apache.hadoop.hbase.util.FutureUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
+import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
+import org.apache.hbase.thirdparty.com.google.common.collect.Multimaps;
+import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * The implementation for {@link BulkLoadHFiles}, and also can be executed from command line as a
  * tool.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-public class BulkLoadHFilesTool extends LoadIncrementalHFiles implements BulkLoadHFiles {
+public class BulkLoadHFilesTool extends Configured implements BulkLoadHFiles, Tool {
+
+  private static final Logger LOG = LoggerFactory.getLogger(BulkLoadHFilesTool.class);
 
   public static final String NAME = "completebulkload";
 
+  // We use a '.' prefix which is ignored when walking directory trees
+  // above. It is invalid family name.
+  static final String TMP_DIR = ".tmp";
+
+  private final int maxFilesPerRegionPerFamily;
+  private final boolean assignSeqIds;
+
+  // Source delegation token
+  private final FsDelegationToken fsDelegationToken;
+  private final UserProvider userProvider;
+  private final int nrThreads;
+  private final AtomicInteger numRetries = new AtomicInteger(0);
+  private String bulkToken;
+
   public BulkLoadHFilesTool(Configuration conf) {
-    super(conf);
+    // make a copy, just to be sure we're not overriding someone else's config
+    super(new Configuration(conf));
+    // disable blockcache for tool invocation, see HBASE-10500
+    conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
+    userProvider = UserProvider.instantiate(conf);
+    fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
+    assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
+    maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
+    nrThreads =
+      conf.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors());
+  }
+
+  // Initialize a thread pool
+  private ExecutorService createExecutorService() {
+    ThreadPoolExecutor pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
+      new LinkedBlockingQueue<>(),
+      new ThreadFactoryBuilder().setNameFormat("BulkLoadHFilesTool-%1$d").build());
+    pool.allowCoreThreadTimeOut(true);
+    return pool;
+  }
+
+  private boolean isCreateTable() {
+    return "yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"));
+  }
+
+  private boolean isSilence() {
+    return "yes".equalsIgnoreCase(getConf().get(IGNORE_UNMATCHED_CF_CONF_KEY, ""));
+  }
+
+  private boolean isAlwaysCopyFiles() {
+    return getConf().getBoolean(ALWAYS_COPY_FILES, false);
+  }
+
+  private static boolean shouldCopyHFileMetaKey(byte[] key) {
+    // skip encoding to keep hfile meta consistent with data block info, see HBASE-15085
+    if (Bytes.equals(key, HFileDataBlockEncoder.DATA_BLOCK_ENCODING)) {
+      return false;
+    }
+
+    return !HFile.isReservedFileInfoKey(key);
+  }
+
+  /**
+   * Checks whether there is any invalid family name in HFiles to be bulk loaded.
+   */
+  private static void validateFamiliesInHFiles(TableDescriptor tableDesc,
+      Deque<LoadQueueItem> queue, boolean silence) throws IOException {
+    Set<String> familyNames = Arrays.asList(tableDesc.getColumnFamilies()).stream()
+      .map(f -> f.getNameAsString()).collect(Collectors.toSet());
+    List<String> unmatchedFamilies = queue.stream().map(item -> Bytes.toString(item.getFamily()))
+      .filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList());
+    if (unmatchedFamilies.size() > 0) {
+      String msg =
+        "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " +
+          unmatchedFamilies + "; valid family names of table " + tableDesc.getTableName() +
+          " are: " + familyNames;
+      LOG.error(msg);
+      if (!silence) {
+        throw new IOException(msg);
+      }
+    }
+  }
+
+  /**
+   * Populate the Queue with given HFiles
+   */
+  private static void populateLoadQueue(Deque<LoadQueueItem> ret, Map<byte[], List<Path>> map) {
+    map.forEach((k, v) -> v.stream().map(p -> new LoadQueueItem(k, p)).forEachOrdered(ret::add));
+  }
+
+  private interface BulkHFileVisitor<TFamily> {
+
+    TFamily bulkFamily(byte[] familyName) throws IOException;
+
+    void bulkHFile(TFamily family, FileStatus hfileStatus) throws IOException;
+  }
+
+  /**
+   * Iterate over the bulkDir hfiles. Skip reference, HFileLink, files starting with "_". Check and
+   * skip non-valid hfiles by default, or skip this validation by setting
+   * 'hbase.loadincremental.validate.hfile' to false.
+   */
+  private static <TFamily> void visitBulkHFiles(FileSystem fs, Path bulkDir,
+      BulkHFileVisitor<TFamily> visitor, boolean validateHFile) throws IOException {
+    FileStatus[] familyDirStatuses = fs.listStatus(bulkDir);
+    for (FileStatus familyStat : familyDirStatuses) {
+      if (!familyStat.isDirectory()) {
+        LOG.warn("Skipping non-directory " + familyStat.getPath());
+        continue;
+      }
+      Path familyDir = familyStat.getPath();
+      byte[] familyName = Bytes.toBytes(familyDir.getName());
+      // Skip invalid family
+      try {
+        ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(familyName);
+      } catch (IllegalArgumentException e) {
+        LOG.warn("Skipping invalid " + familyStat.getPath());
+        continue;
+      }
+      TFamily family = visitor.bulkFamily(familyName);
+
+      FileStatus[] hfileStatuses = fs.listStatus(familyDir);
+      for (FileStatus hfileStatus : hfileStatuses) {
+        if (!fs.isFile(hfileStatus.getPath())) {
+          LOG.warn("Skipping non-file " + hfileStatus);
+          continue;
+        }
+
+        Path hfile = hfileStatus.getPath();
+        // Skip "_", reference, HFileLink
+        String fileName = hfile.getName();
+        if (fileName.startsWith("_")) {
+          continue;
+        }
+        if (StoreFileInfo.isReference(fileName)) {
+          LOG.warn("Skipping reference " + fileName);
+          continue;
+        }
+        if (HFileLink.isHFileLink(fileName)) {
+          LOG.warn("Skipping HFileLink " + fileName);
+          continue;
+        }
+
+        // Validate HFile Format if needed
+        if (validateHFile) {
+          try {
+            if (!HFile.isHFileFormat(fs, hfile)) {
+              LOG.warn("the file " + hfile + " doesn't seems to be an hfile. skipping");
+              continue;
+            }
+          } catch (FileNotFoundException e) {
+            LOG.warn("the file " + hfile + " was removed");
+            continue;
+          }
+        }
+
+        visitor.bulkHFile(family, hfileStatus);
+      }
+    }
+  }
+
+  /**
+   * Walk the given directory for all HFiles, and return a Queue containing all such files.
+   */
+  private static void discoverLoadQueue(Configuration conf, Deque<LoadQueueItem> ret, Path hfofDir,
+      boolean validateHFile) throws IOException {
+    visitBulkHFiles(hfofDir.getFileSystem(conf), hfofDir, new BulkHFileVisitor<byte[]>() {
+      @Override
+      public byte[] bulkFamily(final byte[] familyName) {
+        return familyName;
+      }
+
+      @Override
+      public void bulkHFile(final byte[] family, final FileStatus hfile) throws IOException {
+        long length = hfile.getLen();
+        if (length > conf.getLong(HConstants.HREGION_MAX_FILESIZE,
+          HConstants.DEFAULT_MAX_FILE_SIZE)) {
+          LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length +
+            " bytes can be problematic as it may lead to oversplitting.");
+        }
+        ret.add(new LoadQueueItem(family, hfile.getPath()));
+      }
+    }, validateHFile);
+  }
+
+  /**
+   * Prepare a collection of {@code LoadQueueItem} from list of source hfiles contained in the
+   * passed directory and validates whether the prepared queue has all the valid table column
+   * families in it.
+   * @param map map of family to List of hfiles
+   * @param tableName table to which hfiles should be loaded
+   * @param queue queue which needs to be loaded into the table
+   * @param silence true to ignore unmatched column families
+   * @throws IOException If any I/O or network error occurred
+   */
+  public static void prepareHFileQueue(AsyncClusterConnection conn, TableName tableName,
+      Map<byte[], List<Path>> map, Deque<LoadQueueItem> queue, boolean silence) throws IOException {
+    populateLoadQueue(queue, map);
+    validateFamiliesInHFiles(FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), queue,
+      silence);
+  }
+
+  /**
+   * Prepare a collection of {@code LoadQueueItem} from list of source hfiles contained in the
+   * passed directory and validates whether the prepared queue has all the valid table column
+   * families in it.
+   * @param hfilesDir directory containing list of hfiles to be loaded into the table
+   * @param queue queue which needs to be loaded into the table
+   * @param validateHFile if true hfiles will be validated for its format
+   * @param silence true to ignore unmatched column families
+   * @throws IOException If any I/O or network error occurred
+   */
+  public static void prepareHFileQueue(Configuration conf, AsyncClusterConnection conn,
+      TableName tableName, Path hfilesDir, Deque<LoadQueueItem> queue, boolean validateHFile,
+      boolean silence) throws IOException {
+    discoverLoadQueue(conf, queue, hfilesDir, validateHFile);
+    validateFamiliesInHFiles(FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), queue,
+      silence);
+  }
+
+  /**
+   * Used by the replication sink to load the hfiles from the source cluster. It does the following,
+   * <ol>
+   * <li>{@link #groupOrSplitPhase(AsyncClusterConnection, TableName, ExecutorService, Deque, List)}
+   * </li>
+   * <li>{@link #bulkLoadPhase(AsyncClusterConnection, TableName, Deque, Multimap, boolean, Map)}
+   * </li>
+   * </ol>
+   * @param conn Connection to use
+   * @param tableName Table to which these hfiles should be loaded to
+   * @param queue {@code LoadQueueItem} has hfiles yet to be loaded
+   */
+  public void loadHFileQueue(AsyncClusterConnection conn, TableName tableName,
+      Deque<LoadQueueItem> queue, boolean copyFiles) throws IOException {
+    ExecutorService pool = createExecutorService();
+    try {
+      Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(conn, tableName, pool,
+        queue, FutureUtils.get(conn.getRegionLocator(tableName).getStartEndKeys())).getFirst();
+      bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, null);
+    } finally {
+      pool.shutdown();
+    }
+  }
+
+  /**
+   * This takes the LQI's grouped by likely regions and attempts to bulk load them. Any failures are
+   * re-queued for another pass with the groupOrSplitPhase.
+   * <p/>
+   * protected for testing.
+   */
+  @VisibleForTesting
+  protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName,
+      Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups,
+      boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
+    // atomically bulk load the groups.
+    List<Future<Collection<LoadQueueItem>>> loadingFutures = new ArrayList<>();
+    for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> entry : regionGroups.asMap()
+      .entrySet()) {
+      byte[] first = entry.getKey().array();
+      final Collection<LoadQueueItem> lqis = entry.getValue();
+      List<Pair<byte[], String>> familyPaths =
+        lqis.stream().map(lqi -> Pair.newPair(lqi.getFamily(), lqi.getFilePath().toString()))
+          .collect(Collectors.toList());
+      CompletableFuture<Collection<LoadQueueItem>> future = new CompletableFuture<>();
+      FutureUtils.addListener(conn.bulkLoad(tableName, familyPaths, first, assignSeqIds,
+        fsDelegationToken.getUserToken(), bulkToken, copyFiles), (loaded, error) -> {
+          if (error != null) {
+            LOG.error("Encountered unrecoverable error from region server", error);
+            if (getConf().getBoolean(RETRY_ON_IO_EXCEPTION, false) &&
+              numRetries.get() < getConf().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+                HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER)) {
+              LOG.warn("Will attempt to retry loading failed HFiles. Retry #" +
+                numRetries.incrementAndGet());
+              // return lqi's to retry
+              future.complete(lqis);
+            } else {
+              LOG.error(RETRY_ON_IO_EXCEPTION +
+                " is disabled or we have reached retry limit. Unable to recover");
+              future.completeExceptionally(error);
+            }
+          } else {
+            if (loaded) {
+              future.complete(Collections.emptyList());
+            } else {
+              LOG.warn("Attempt to bulk load region containing " + Bytes.toStringBinary(first) +
+                " into table " + tableName + " with files " + lqis +
+                " failed.  This is recoverable and they will be retried.");
+              // return lqi's to retry
+              future.complete(lqis);
+            }
+          }
+        });
+      loadingFutures.add(future);
+      if (item2RegionMap != null) {
+        for (LoadQueueItem lqi : lqis) {
+          item2RegionMap.put(lqi, entry.getKey());
+        }
+      }
+    }
+
+    // get all the results.
+    for (Future<Collection<LoadQueueItem>> future : loadingFutures) {
+      try {
+        Collection<LoadQueueItem> toRetry = future.get();
+
+        for (LoadQueueItem lqi : toRetry) {
+          item2RegionMap.remove(lqi);
+        }
+        // LQIs that are requeued to be regrouped.
+        queue.addAll(toRetry);
+      } catch (ExecutionException e1) {
+        Throwable t = e1.getCause();
+        if (t instanceof IOException) {
+          // At this point something unrecoverable has happened.
+          // TODO Implement bulk load recovery
+          throw new IOException("BulkLoad encountered an unrecoverable problem", t);
+        }
+        LOG.error("Unexpected execution exception during bulk load", e1);
+        throw new IllegalStateException(t);
+      } catch (InterruptedException e1) {
+        LOG.error("Unexpected interrupted exception during bulk load", e1);
+        throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
+      }
+    }
+  }
+
+  private boolean checkHFilesCountPerRegionPerFamily(
+      final Multimap<ByteBuffer, LoadQueueItem> regionGroups) {
+    for (Map.Entry<ByteBuffer, Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
+      Map<byte[], MutableInt> filesMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+      for (LoadQueueItem lqi : e.getValue()) {
+        MutableInt count = filesMap.computeIfAbsent(lqi.getFamily(), k -> new MutableInt());
+        count.increment();
+        if (count.intValue() > maxFilesPerRegionPerFamily) {
+          LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily +
+            " hfiles to family " + Bytes.toStringBinary(lqi.getFamily()) +
+            " of region with start key " + Bytes.toStringBinary(e.getKey()));
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  /**
+   * @param table the table to load into
+   * @param pool the ExecutorService
+   * @param queue the queue for LoadQueueItem
+   * @param startEndKeys start and end keys
+   * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles.
+   */
+  private Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> groupOrSplitPhase(
+      AsyncClusterConnection conn, TableName tableName, ExecutorService pool,
+      Deque<LoadQueueItem> queue, List<Pair<byte[], byte[]>> startEndKeys) throws IOException {
+    // <region start key, LQI> need synchronized only within this scope of this
+    // phase because of the puts that happen in futures.
+    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
+    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);
+    Set<String> missingHFiles = new HashSet<>();
+    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair =
+      new Pair<>(regionGroups, missingHFiles);
+
+    // drain LQIs and figure out bulk load groups
+    Set<Future<Pair<List<LoadQueueItem>, String>>> splittingFutures = new HashSet<>();
+    while (!queue.isEmpty()) {
+      final LoadQueueItem item = queue.remove();
+
+      final Callable<Pair<List<LoadQueueItem>, String>> call =
+        new Callable<Pair<List<LoadQueueItem>, String>>() {
+          @Override
+          public Pair<List<LoadQueueItem>, String> call() throws Exception {
+            Pair<List<LoadQueueItem>, String> splits =
+              groupOrSplit(conn, tableName, regionGroups, item, startEndKeys);
+            return splits;
+          }
+        };
+      splittingFutures.add(pool.submit(call));
+    }
+    // get all the results. All grouping and splitting must finish before
+    // we can attempt the atomic loads.
+    for (Future<Pair<List<LoadQueueItem>, String>> lqis : splittingFutures) {
+      try {
+        Pair<List<LoadQueueItem>, String> splits = lqis.get();
+        if (splits != null) {
+          if (splits.getFirst() != null) {
+            queue.addAll(splits.getFirst());
+          } else {
+            missingHFiles.add(splits.getSecond());
+          }
+        }
+      } catch (ExecutionException e1) {
+        Throwable t = e1.getCause();
+        if (t instanceof IOException) {
+          LOG.error("IOException during splitting", e1);
+          throw (IOException) t; // would have been thrown if not parallelized,
+        }
+        LOG.error("Unexpected execution exception during splitting", e1);
+        throw new IllegalStateException(t);
+      } catch (InterruptedException e1) {
+        LOG.error("Unexpected interrupted exception during splitting", e1);
+        throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
+      }
+    }
+    return pair;
+  }
+
+  // unique file name for the table
+  private String getUniqueName() {
+    return UUID.randomUUID().toString().replaceAll("-", "");
+  }
+
+  private List<LoadQueueItem> splitStoreFile(LoadQueueItem item, TableDescriptor tableDesc,
+      byte[] startKey, byte[] splitKey) throws IOException {
+    Path hfilePath = item.getFilePath();
+    byte[] family = item.getFamily();
+    Path tmpDir = hfilePath.getParent();
+    if (!tmpDir.getName().equals(TMP_DIR)) {
+      tmpDir = new Path(tmpDir, TMP_DIR);
+    }
+
+    LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + "region. Splitting...");
+
+    String uniqueName = getUniqueName();
+    ColumnFamilyDescriptor familyDesc = tableDesc.getColumnFamily(family);
+
+    Path botOut = new Path(tmpDir, uniqueName + ".bottom");
+    Path topOut = new Path(tmpDir, uniqueName + ".top");
+    splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut);
+
+    FileSystem fs = tmpDir.getFileSystem(getConf());
+    fs.setPermission(tmpDir, FsPermission.valueOf("-rwxrwxrwx"));
+    fs.setPermission(botOut, FsPermission.valueOf("-rwxrwxrwx"));
+    fs.setPermission(topOut, FsPermission.valueOf("-rwxrwxrwx"));
+
+    // Add these back at the *front* of the queue, so there's a lower
+    // chance that the region will just split again before we get there.
+    List<LoadQueueItem> lqis = new ArrayList<>(2);
+    lqis.add(new LoadQueueItem(family, botOut));
+    lqis.add(new LoadQueueItem(family, topOut));
+
+    // If the current item is already the result of previous splits,
+    // we don't need it anymore. Clean up to save space.
+    // It is not part of the original input files.
+    try {
+      if (tmpDir.getName().equals(TMP_DIR)) {
+        fs.delete(hfilePath, false);
+      }
+    } catch (IOException e) {
+      LOG.warn("Unable to delete temporary split file " + hfilePath);
+    }
+    LOG.info("Successfully split into new HFiles " + botOut + " and " + topOut);
+    return lqis;
+  }
+
+  /**
+   * Attempt to assign the given load queue item into its target region group. If the hfile boundary
+   * no longer fits into a region, physically splits the hfile such that the new bottom half will
+   * fit and returns the list of LQI's corresponding to the resultant hfiles.
+   * <p/>
+   * protected for testing
+   * @throws IOException if an IO failure is encountered
+   */
+  @VisibleForTesting
+  protected Pair<List<LoadQueueItem>, String> groupOrSplit(AsyncClusterConnection conn,
+      TableName tableName, Multimap<ByteBuffer, LoadQueueItem> regionGroups, LoadQueueItem item,
+      List<Pair<byte[], byte[]>> startEndKeys) throws IOException {
+    Path hfilePath = item.getFilePath();
+    Optional<byte[]> first, last;
+    try (HFile.Reader hfr = HFile.createReader(hfilePath.getFileSystem(getConf()), hfilePath,
+      CacheConfig.DISABLED, true, getConf())) {
+      hfr.loadFileInfo();
+      first = hfr.getFirstRowKey();
+      last = hfr.getLastRowKey();
+    } catch (FileNotFoundException fnfe) {
+      LOG.debug("encountered", fnfe);
+      return new Pair<>(null, hfilePath.getName());
+    }
+
+    LOG.info("Trying to load hfile=" + hfilePath + " first=" + first.map(Bytes::toStringBinary) +
+      " last=" + last.map(Bytes::toStringBinary));
+    if (!first.isPresent() || !last.isPresent()) {
+      assert !first.isPresent() && !last.isPresent();
+      // TODO what if this is due to a bad HFile?
+      LOG.info("hfile " + hfilePath + " has no entries, skipping");
+      return null;
+    }
+    if (Bytes.compareTo(first.get(), last.get()) > 0) {
+      throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(first.get()) +
+        " > " + Bytes.toStringBinary(last.get()));
+    }
+    int idx =
+      Collections.binarySearch(startEndKeys, Pair.newPair(first.get(), HConstants.EMPTY_END_ROW),
+        (p1, p2) -> Bytes.compareTo(p1.getFirst(), p2.getFirst()));
+    if (idx < 0) {
+      // not on boundary, returns -(insertion index). Calculate region it
+      // would be in.
+      idx = -(idx + 1) - 1;
+    }
+    int indexForCallable = idx;
+
+    /**
+     * we can consider there is a region hole in following conditions. 1) if idx < 0,then first
+     * region info is lost. 2) if the endkey of a region is not equal to the startkey of the next
+     * region. 3) if the endkey of the last region is not empty.
+     */
+    if (indexForCallable < 0) {
+      throw new IOException("The first region info for table " + tableName +
+        " can't be found in hbase:meta.Please use hbck tool to fix it first.");
+    } else if ((indexForCallable == startEndKeys.size() - 1) &&
+      !Bytes.equals(startEndKeys.get(indexForCallable).getSecond(), HConstants.EMPTY_BYTE_ARRAY)) {
+      throw new IOException("The last region info for table " + tableName +
+        " can't be found in hbase:meta.Please use hbck tool to fix it first.");
+    } else if (indexForCallable + 1 < startEndKeys.size() &&
+      !(Bytes.compareTo(startEndKeys.get(indexForCallable).getSecond(),
+        startEndKeys.get(indexForCallable + 1).getFirst()) == 0)) {
+      throw new IOException("The endkey of one region for table " + tableName +
+        " is not equal to the startkey of the next region in hbase:meta." +
+        "Please use hbck tool to fix it first.");
+    }
+
+    boolean lastKeyInRange = Bytes.compareTo(last.get(), startEndKeys.get(idx).getSecond()) < 0 ||
+      Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY);
+    if (!lastKeyInRange) {
+      Pair<byte[], byte[]> startEndKey = startEndKeys.get(indexForCallable);
+      List<LoadQueueItem> lqis =
+        splitStoreFile(item, FutureUtils.get(conn.getAdmin().getDescriptor(tableName)),
+          startEndKey.getFirst(), startEndKey.getSecond());
+      return new Pair<>(lqis, null);
+    }
+
+    // group regions.
+    regionGroups.put(ByteBuffer.wrap(startEndKeys.get(idx).getFirst()), item);
+    return null;
+  }
+
+  /**
+   * Split a storefile into a top and bottom half, maintaining the metadata, recreating bloom
+   * filters, etc.
+   */
+  @VisibleForTesting
+  static void splitStoreFile(Configuration conf, Path inFile, ColumnFamilyDescriptor familyDesc,
+      byte[] splitKey, Path bottomOut, Path topOut) throws IOException {
+    // Open reader with no block cache, and not in-memory
+    Reference topReference = Reference.createTopReference(splitKey);
+    Reference bottomReference = Reference.createBottomReference(splitKey);
+
+    copyHFileHalf(conf, inFile, topOut, topReference, familyDesc);
+    copyHFileHalf(conf, inFile, bottomOut, bottomReference, familyDesc);
+  }
+
+  /**
+   * Copy half of an HFile into a new HFile.
+   */
+  private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile,
+      Reference reference, ColumnFamilyDescriptor familyDescriptor) throws IOException {
+    FileSystem fs = inFile.getFileSystem(conf);
+    CacheConfig cacheConf = CacheConfig.DISABLED;
+    HalfStoreFileReader halfReader = null;
+    StoreFileWriter halfWriter = null;
+    try {
+      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, true,
+        new AtomicInteger(0), true, conf);
+      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
+
+      int blocksize = familyDescriptor.getBlocksize();
+      Algorithm compression = familyDescriptor.getCompressionType();
+      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
+      HFileContext hFileContext = new HFileContextBuilder().withCompression(compression)
+        .withChecksumType(HStore.getChecksumType(conf))
+        .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize)
+        .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true)
+        .build();
+      halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile)
+        .withBloomType(bloomFilterType).withFileContext(hFileContext).build();
+      HFileScanner scanner = halfReader.getScanner(false, false, false);
+      scanner.seekTo();
+      do {
+        halfWriter.append(scanner.getCell());
+      } while (scanner.next());
+
+      for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
+        if (shouldCopyHFileMetaKey(entry.getKey())) {
+          halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
+        }
+      }
+    } finally {
+      if (halfReader != null) {
+        try {
+          halfReader.close(cacheConf.shouldEvictOnClose());
+        } catch (IOException e) {
+          LOG.warn("failed to close hfile reader for " + inFile, e);
+        }
+      }
+      if (halfWriter != null) {
+        halfWriter.close();
+      }
+    }
+  }
+
+  /**
+   * Infers region boundaries for a new table.
+   * <p/>
+   * Parameter: <br/>
+   * bdryMap is a map between keys to an integer belonging to {+1, -1}
+   * <ul>
+   * <li>If a key is a start key of a file, then it maps to +1</li>
+   * <li>If a key is an end key of a file, then it maps to -1</li>
+   * </ul>
+   * <p>
+   * Algo:<br/>
+   * <ol>
+   * <li>Poll on the keys in order:
+   * <ol type="a">
+   * <li>Keep adding the mapped values to these keys (runningSum)</li>
+   * <li>Each time runningSum reaches 0, add the start Key from when the runningSum had started to a
+   * boundary list.</li>
+   * </ol>
+   * </li>
+   * <li>Return the boundary list.</li>
+   * </ol>
+   */
+  public static byte[][] inferBoundaries(SortedMap<byte[], Integer> bdryMap) {
+    List<byte[]> keysArray = new ArrayList<>();
+    int runningValue = 0;
+    byte[] currStartKey = null;
+    boolean firstBoundary = true;
+
+    for (Map.Entry<byte[], Integer> item : bdryMap.entrySet()) {
+      if (runningValue == 0) {
+        currStartKey = item.getKey();
+      }
+      runningValue += item.getValue();
+      if (runningValue == 0) {
+        if (!firstBoundary) {
+          keysArray.add(currStartKey);
+        }
+        firstBoundary = false;
+      }
+    }
+
+    return keysArray.toArray(new byte[0][]);
+  }
+
+  /**
+   * If the table is created for the first time, then "completebulkload" reads the files twice. More
+   * modifications necessary if we want to avoid doing it.
+   */
+  private void createTable(TableName tableName, Path hfofDir, AsyncAdmin admin) throws IOException {
+    final FileSystem fs = hfofDir.getFileSystem(getConf());
+
+    // Add column families
+    // Build a set of keys
+    List<ColumnFamilyDescriptorBuilder> familyBuilders = new ArrayList<>();
+    SortedMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor<ColumnFamilyDescriptorBuilder>() {
+      @Override
+      public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) {
+        ColumnFamilyDescriptorBuilder builder =
+          ColumnFamilyDescriptorBuilder.newBuilder(familyName);
+        familyBuilders.add(builder);
+        return builder;
+      }
+
+      @Override
+      public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus)
+          throws IOException {
+        Path hfile = hfileStatus.getPath();
+        try (HFile.Reader reader =
+          HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) {
+          if (builder.getCompressionType() != reader.getFileContext().getCompression()) {
+            builder.setCompressionType(reader.getFileContext().getCompression());
+            LOG.info("Setting compression " + reader.getFileContext().getCompression().name() +
+              " for family " + builder.getNameAsString());
+          }
+          reader.loadFileInfo();
+          byte[] first = reader.getFirstRowKey().get();
+          byte[] last = reader.getLastRowKey().get();
+
+          LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" +
+            Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
+
+          // To eventually infer start key-end key boundaries
+          Integer value = map.containsKey(first) ? map.get(first) : 0;
+          map.put(first, value + 1);
+
+          value = map.containsKey(last) ? map.get(last) : 0;
+          map.put(last, value - 1);
+        }
+      }
+    }, true);
+
+    byte[][] keys = inferBoundaries(map);
+    TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName);
+    familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build)
+      .forEachOrdered(tdBuilder::setColumnFamily);
+    FutureUtils.get(admin.createTable(tdBuilder.build(), keys));
+
+    LOG.info("Table " + tableName + " is available!!");
+  }
+
+  private Map<LoadQueueItem, ByteBuffer> performBulkLoad(AsyncClusterConnection conn,
+      TableName tableName, Deque<LoadQueueItem> queue, ExecutorService pool, boolean copyFile)
+      throws IOException {
+    int count = 0;
+
+    fsDelegationToken.acquireDelegationToken(queue.peek().getFilePath().getFileSystem(getConf()));
+    bulkToken = FutureUtils.get(conn.prepareBulkLoad(tableName));
+    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = null;
+
+    Map<LoadQueueItem, ByteBuffer> item2RegionMap = new HashMap<>();
+    // Assumes that region splits can happen while this occurs.
+    while (!queue.isEmpty()) {
+      // need to reload split keys each iteration.
+      final List<Pair<byte[], byte[]>> startEndKeys =
+        FutureUtils.get(conn.getRegionLocator(tableName).getStartEndKeys());
+      if (count != 0) {
+        LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " +
+          queue.size() + " files remaining to group or split");
+      }
+
+      int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
+      maxRetries = Math.max(maxRetries, startEndKeys.size() + 1);
+      if (maxRetries != 0 && count >= maxRetries) {
+        throw new IOException(
+          "Retry attempted " + count + " times without completing, bailing out");
+      }
+      count++;
+
+      // Using ByteBuffer for byte[] equality semantics
+      pair = groupOrSplitPhase(conn, tableName, pool, queue, startEndKeys);
+      Multimap<ByteBuffer, LoadQueueItem> regionGroups = pair.getFirst();
+
+      if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
+        // Error is logged inside checkHFilesCountPerRegionPerFamily.
+        throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily +
+          " hfiles to one family of one region");
+      }
+
+      bulkLoadPhase(conn, tableName, queue, regionGroups, copyFile, item2RegionMap);
+
+      // NOTE: The next iteration's split / group could happen in parallel to
+      // atomic bulkloads assuming that there are splits and no merges, and
+      // that we can atomically pull out the groups we want to retry.
+    }
+
+    if (!queue.isEmpty()) {
+      throw new RuntimeException(
+        "Bulk load aborted with some files not yet loaded." + "Please check log for more details.");
+    }
+    return item2RegionMap;
+  }
+
+  private void cleanup(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue,
+      ExecutorService pool) throws IOException {
+    fsDelegationToken.releaseDelegationToken();
+    if (bulkToken != null) {
+      conn.cleanupBulkLoad(tableName, bulkToken);
+    }
+    if (pool != null) {
+      pool.shutdown();
+    }
+    if (!queue.isEmpty()) {
+      StringBuilder err = new StringBuilder();
+      err.append("-------------------------------------------------\n");
+      err.append("Bulk load aborted with some files not yet loaded:\n");
+      err.append("-------------------------------------------------\n");
+      for (LoadQueueItem q : queue) {
+        err.append("  ").append(q.getFilePath()).append('\n');
+      }
+      LOG.error(err.toString());
+    }
   }
 
-  private Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> convert(
-      Map<LoadIncrementalHFiles.LoadQueueItem, ByteBuffer> map) {
-    return map.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
+  /**
+   * Perform a bulk load of the given directory into the given pre-existing table. This method is
+   * not threadsafe.
+   * @param map map of family to List of hfiles
+   * @param tableName table to load the hfiles
+   * @param silence true to ignore unmatched column families
+   * @param copyFile always copy hfiles if true
+   * @throws TableNotFoundException if table does not yet exist
+   */
+  private Map<LoadQueueItem, ByteBuffer> doBulkLoad(AsyncClusterConnection conn,
+      TableName tableName, Map<byte[], List<Path>> map, boolean silence, boolean copyFile)
+      throws TableNotFoundException, IOException {
+    if (!FutureUtils.get(conn.getAdmin().isTableAvailable(tableName))) {
+      throw new TableNotFoundException("Table " + tableName + " is not currently available.");
+    }
+    // LQI queue does not need to be threadsafe -- all operations on this queue
+    // happen in this thread
+    Deque<LoadQueueItem> queue = new ArrayDeque<>();
+    ExecutorService pool = null;
+    try {
+      prepareHFileQueue(conn, tableName, map, queue, silence);
+      if (queue.isEmpty()) {
+        LOG.warn("Bulk load operation did not get any files to load");
+        return Collections.emptyMap();
+      }
+      pool = createExecutorService();
+      return performBulkLoad(conn, tableName, queue, pool, copyFile);
+    } finally {
+      cleanup(conn, tableName, queue, pool);
+    }
+  }
+
+  /**
+   * Perform a bulk load of the given directory into the given pre-existing table. This method is
+   * not threadsafe.
+   * @param tableName table to load the hfiles
+   * @param hfofDir the directory that was provided as the output path of a job using
+   *          HFileOutputFormat
+   * @param silence true to ignore unmatched column families
+   * @param copyFile always copy hfiles if true
+   * @throws TableNotFoundException if table does not yet exist
+   */
+  private Map<LoadQueueItem, ByteBuffer> doBulkLoad(AsyncClusterConnection conn,
+      TableName tableName, Path hfofDir, boolean silence, boolean copyFile)
+      throws TableNotFoundException, IOException {
+    if (!FutureUtils.get(conn.getAdmin().isTableAvailable(tableName))) {
+      throw new TableNotFoundException("Table " + tableName + " is not currently available.");
+    }
+
+    /*
+     * Checking hfile format is a time-consuming operation, we should have an option to skip this
+     * step when bulkloading millions of HFiles. See HBASE-13985.
+     */
+    boolean validateHFile = getConf().getBoolean("hbase.loadincremental.validate.hfile", true);
+    if (!validateHFile) {
+      LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " +
+        "are not correct. If you fail to read data from your table after using this " +
+        "option, consider removing the files and bulkload again without this option. " +
+        "See HBASE-13985");
+    }
+    // LQI queue does not need to be threadsafe -- all operations on this queue
+    // happen in this thread
+    Deque<LoadQueueItem> queue = new ArrayDeque<>();
+    ExecutorService pool = null;
+    try {
+      prepareHFileQueue(getConf(), conn, tableName, hfofDir, queue, validateHFile, silence);
+
+      if (queue.isEmpty()) {
+        LOG.warn(
+          "Bulk load operation did not find any files to load in directory {}. " +
+          "Does it contain files in subdirectories that correspond to column family names?",
+          (hfofDir != null ? hfofDir.toUri().toString() : ""));
+        return Collections.emptyMap();
+      }
+      pool = createExecutorService();
+      return performBulkLoad(conn, tableName, queue, pool, copyFile);
+    } finally {
+      cleanup(conn, tableName, queue, pool);
+    }
   }
 
   @Override
-  public Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> bulkLoad(TableName tableName,
+  public Map<LoadQueueItem, ByteBuffer> bulkLoad(TableName tableName,
       Map<byte[], List<Path>> family2Files) throws TableNotFoundException, IOException {
-    return convert(run(family2Files, tableName));
+    try (AsyncClusterConnection conn = ClusterConnectionFactory
+      .createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) {
+      if (!FutureUtils.get(conn.getAdmin().tableExists(tableName))) {
+        String errorMsg = format("Table '%s' does not exist.", tableName);
+        LOG.error(errorMsg);
+        throw new TableNotFoundException(errorMsg);
+      }
+      return doBulkLoad(conn, tableName, family2Files, isSilence(), isAlwaysCopyFiles());
+    }
   }
 
   @Override
-  public Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> bulkLoad(TableName tableName, Path dir)
+  public Map<LoadQueueItem, ByteBuffer> bulkLoad(TableName tableName, Path dir)
       throws TableNotFoundException, IOException {
-    return convert(run(dir, tableName));
+    try (AsyncClusterConnection conn = ClusterConnectionFactory
+      .createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) {
+      AsyncAdmin admin = conn.getAdmin();
+      if (!FutureUtils.get(admin.tableExists(tableName))) {
+        if (isCreateTable()) {
+          createTable(tableName, dir, admin);
+        } else {
+          String errorMsg = format("Table '%s' does not exist.", tableName);
+          LOG.error(errorMsg);
+          throw new TableNotFoundException(errorMsg);
+        }
+      }
+      return doBulkLoad(conn, tableName, dir, isSilence(), isAlwaysCopyFiles());
+    }
+  }
+
+  public void setBulkToken(String bulkToken) {
+    this.bulkToken = bulkToken;
+  }
+
+  private void usage() {
+    System.err.println("usage: " + "bin/hbase completebulkload <-Dargs> "
+        + "</path/to/hfileoutputformat-output> <tablename>\n"
+        + "\t-D" + CREATE_TABLE_CONF_KEY + "=no can be used to avoid creation "
+        + "of a table by this tool.\n"
+        + "\t Note: if you set this to 'no', then target table must already exist.\n"
+        + "\t-D" + IGNORE_UNMATCHED_CF_CONF_KEY + "=yes can be used to ignore "
+        + "unmatched column families.\n"
+        + "\t-loadTable switch implies your baseDirectory to store file has a "
+        + "depth of 3, table must exist\n"
+        + "\t and -loadTable switch is the last option on the command line.\n\n");
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    if (args.length != 2 && args.length != 3) {
+      usage();
+      return -1;
+    }
+    Path dirPath = new Path(args[0]);
+    TableName tableName = TableName.valueOf(args[1]);
+
+    if (args.length == 2) {
+      return !bulkLoad(tableName, dirPath).isEmpty() ? 0 : -1;
+    } else {
+      Map<byte[], List<Path>> family2Files = Maps.newHashMap();
+      FileSystem fs = FileSystem.get(getConf());
+      for (FileStatus regionDir : fs.listStatus(dirPath)) {
+        FSVisitor.visitRegionStoreFiles(fs, regionDir.getPath(), (region, family, hfileName) -> {
+          Path path = new Path(regionDir.getPath(), new Path(family, hfileName));
+          byte[] familyName = Bytes.toBytes(family);
+          if (family2Files.containsKey(familyName)) {
+            family2Files.get(familyName).add(path);
+          } else {
+            family2Files.put(familyName, Lists.newArrayList(path));
+          }
+        });
+      }
+      return !bulkLoad(tableName, family2Files).isEmpty() ? 0 : -1;
+    }
   }
 
   public static void main(String[] args) throws Exception {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
deleted file mode 100644
index c981488..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
+++ /dev/null
@@ -1,1285 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.tool;
-
-import static java.lang.String.format;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.Collectors;
-import org.apache.commons.lang3.mutable.MutableInt;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClientServiceCallable;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-import org.apache.hadoop.hbase.client.SecureBulkLoadClient;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.HalfStoreFileReader;
-import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.token.FsDelegationToken;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSHDFSUtils;
-import org.apache.hadoop.hbase.util.FSVisitor;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-import org.apache.hbase.thirdparty.com.google.common.collect.Multimaps;
-import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * Tool to load the output of HFileOutputFormat into an existing table.
- * @deprecated since 2.2.0, will be removed in 3.0.0. Use {@link BulkLoadHFiles} instead. Please
- *             rewrite your code if you rely on methods other than the {@link #run(Map, TableName)}
- *             and {@link #run(String, TableName)}, as all the methods other than them will be
- *             removed with no replacement.
- */
-@Deprecated
-@InterfaceAudience.Public
-public class LoadIncrementalHFiles extends Configured implements Tool {
-
-  private static final Logger LOG = LoggerFactory.getLogger(LoadIncrementalHFiles.class);
-
-  /**
-   * @deprecated since 2.2.0, will be removed in 3.0.0, with no replacement. End user should not
-   *             depend on this value.
-   */
-  @Deprecated
-  public static final String NAME = BulkLoadHFilesTool.NAME;
-  static final String RETRY_ON_IO_EXCEPTION = BulkLoadHFiles.RETRY_ON_IO_EXCEPTION;
-  public static final String MAX_FILES_PER_REGION_PER_FAMILY =
-    BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY;
-  private static final String ASSIGN_SEQ_IDS = BulkLoadHFiles.ASSIGN_SEQ_IDS;
-  public final static String CREATE_TABLE_CONF_KEY = BulkLoadHFiles.CREATE_TABLE_CONF_KEY;
-  public final static String IGNORE_UNMATCHED_CF_CONF_KEY =
-    BulkLoadHFiles.IGNORE_UNMATCHED_CF_CONF_KEY;
-  public final static String ALWAYS_COPY_FILES = BulkLoadHFiles.ALWAYS_COPY_FILES;
-
-  // We use a '.' prefix which is ignored when walking directory trees
-  // above. It is invalid family name.
-  static final String TMP_DIR = ".tmp";
-
-  private final int maxFilesPerRegionPerFamily;
-  private final boolean assignSeqIds;
-
-  // Source delegation token
-  private final FsDelegationToken fsDelegationToken;
-  private final UserProvider userProvider;
-  private final int nrThreads;
-  private AtomicInteger numRetries;
-  private final RpcControllerFactory rpcControllerFactory;
-
-  private String bulkToken;
-
-  /**
-   * Represents an HFile waiting to be loaded. An queue is used in this class in order to support
-   * the case where a region has split during the process of the load. When this happens, the HFile
-   * is split into two physical parts across the new region boundary, and each part is added back
-   * into the queue. The import process finishes when the queue is empty.
-   * @deprecated Use {@link BulkLoadHFiles} instead.
-   */
-  @InterfaceAudience.Public
-  @Deprecated
-  public static class LoadQueueItem extends BulkLoadHFiles.LoadQueueItem {
-
-    public LoadQueueItem(byte[] family, Path hfilePath) {
-      super(family, hfilePath);
-    }
-  }
-
-  public LoadIncrementalHFiles(Configuration conf) {
-    // make a copy, just to be sure we're not overriding someone else's config
-    super(HBaseConfiguration.create(conf));
-    conf = getConf();
-    // disable blockcache for tool invocation, see HBASE-10500
-    conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
-    userProvider = UserProvider.instantiate(conf);
-    fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
-    assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
-    maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
-    nrThreads = conf.getInt("hbase.loadincremental.threads.max",
-      Runtime.getRuntime().availableProcessors());
-    numRetries = new AtomicInteger(0);
-    rpcControllerFactory = new RpcControllerFactory(conf);
-  }
-
-  private void usage() {
-    System.err.println("usage: " + "bin/hbase completebulkload <-Dargs> "
-      + "</path/to/hfileoutputformat-output> <tablename>\n"
-      + "\t-D" + CREATE_TABLE_CONF_KEY + "=no can be used to avoid creation "
-      + "of a table by this tool.\n"
-      + "\t Note: if you set this to 'no', then target table must already exist.\n"
-      + "\t-D" + IGNORE_UNMATCHED_CF_CONF_KEY + "=yes can be used to ignore "
-      + "unmatched column families.\n"
-      + "\t-loadTable switch implies your baseDirectory to store file has a "
-      + "depth of 3, table must exist\n"
-      + "\t and -loadTable switch is the last option on the command line.\n\n");
-  }
-
-  /**
-   * Prepare a collection of {@link LoadQueueItem} from list of source hfiles contained in the
-   * passed directory and validates whether the prepared queue has all the valid table column
-   * families in it.
-   * @param hfilesDir directory containing list of hfiles to be loaded into the table
-   * @param table table to which hfiles should be loaded
-   * @param queue queue which needs to be loaded into the table
-   * @param validateHFile if true hfiles will be validated for its format
-   * @throws IOException If any I/O or network error occurred
-   */
-  public void prepareHFileQueue(Path hfilesDir, Table table, Deque<LoadQueueItem> queue,
-      boolean validateHFile) throws IOException {
-    prepareHFileQueue(hfilesDir, table, queue, validateHFile, false);
-  }
-
-  /**
-   * Prepare a collection of {@link LoadQueueItem} from list of source hfiles contained in the
-   * passed directory and validates whether the prepared queue has all the valid table column
-   * families in it.
-   * @param hfilesDir directory containing list of hfiles to be loaded into the table
-   * @param table table to which hfiles should be loaded
-   * @param queue queue which needs to be loaded into the table
-   * @param validateHFile if true hfiles will be validated for its format
-   * @param silence true to ignore unmatched column families
-   * @throws IOException If any I/O or network error occurred
-   */
-  public void prepareHFileQueue(Path hfilesDir, Table table, Deque<LoadQueueItem> queue,
-      boolean validateHFile, boolean silence) throws IOException {
-    discoverLoadQueue(queue, hfilesDir, validateHFile);
-    validateFamiliesInHFiles(table, queue, silence);
-  }
-
-  /**
-   * Prepare a collection of {@link LoadQueueItem} from list of source hfiles contained in the
-   * passed directory and validates whether the prepared queue has all the valid table column
-   * families in it.
-   * @param map map of family to List of hfiles
-   * @param table table to which hfiles should be loaded
-   * @param queue queue which needs to be loaded into the table
-   * @param silence true to ignore unmatched column families
-   * @throws IOException If any I/O or network error occurred
-   */
-  public void prepareHFileQueue(Map<byte[], List<Path>> map, Table table,
-      Deque<LoadQueueItem> queue, boolean silence) throws IOException {
-    populateLoadQueue(queue, map);
-    validateFamiliesInHFiles(table, queue, silence);
-  }
-
-  /**
-   * Perform a bulk load of the given directory into the given pre-existing table. This method is
-   * not threadsafe.
-   * @param hfofDir the directory that was provided as the output path of a job using
-   *          HFileOutputFormat
-   * @param admin the Admin
-   * @param table the table to load into
-   * @param regionLocator region locator
-   * @throws TableNotFoundException if table does not yet exist
-   */
-  public Map<LoadQueueItem, ByteBuffer> doBulkLoad(Path hfofDir, final Admin admin, Table table,
-      RegionLocator regionLocator) throws TableNotFoundException, IOException {
-    return doBulkLoad(hfofDir, admin, table, regionLocator, false, false);
-  }
-
-  /**
-   * Perform a bulk load of the given directory into the given pre-existing table. This method is
-   * not threadsafe.
-   * @param map map of family to List of hfiles
-   * @param admin the Admin
-   * @param table the table to load into
-   * @param regionLocator region locator
-   * @param silence true to ignore unmatched column families
-   * @param copyFile always copy hfiles if true
-   * @throws TableNotFoundException if table does not yet exist
-   */
-  public Map<LoadQueueItem, ByteBuffer> doBulkLoad(Map<byte[], List<Path>> map, final Admin admin,
-      Table table, RegionLocator regionLocator, boolean silence, boolean copyFile)
-      throws TableNotFoundException, IOException {
-    if (!admin.isTableAvailable(regionLocator.getName())) {
-      throw new TableNotFoundException("Table " + table.getName() + " is not currently available.");
-    }
-    // LQI queue does not need to be threadsafe -- all operations on this queue
-    // happen in this thread
-    Deque<LoadQueueItem> queue = new ArrayDeque<>();
-    ExecutorService pool = null;
-    SecureBulkLoadClient secureClient = null;
-    try {
-      prepareHFileQueue(map, table, queue, silence);
-      if (queue.isEmpty()) {
-        LOG.warn("Bulk load operation did not get any files to load");
-        return Collections.emptyMap();
-      }
-      pool = createExecutorService();
-      secureClient = new SecureBulkLoadClient(table.getConfiguration(), table);
-      return performBulkLoad(admin, table, regionLocator, queue, pool, secureClient, copyFile);
-    } finally {
-      cleanup(admin, queue, pool, secureClient);
-    }
-  }
-
-  /**
-   * Perform a bulk load of the given directory into the given pre-existing table. This method is
-   * not threadsafe.
-   * @param hfofDir the directory that was provided as the output path of a job using
-   *          HFileOutputFormat
-   * @param admin the Admin
-   * @param table the table to load into
-   * @param regionLocator region locator
-   * @param silence true to ignore unmatched column families
-   * @param copyFile always copy hfiles if true
-   * @throws TableNotFoundException if table does not yet exist
-   */
-  public Map<LoadQueueItem, ByteBuffer> doBulkLoad(Path hfofDir, final Admin admin, Table table,
-      RegionLocator regionLocator, boolean silence, boolean copyFile)
-      throws TableNotFoundException, IOException {
-    if (!admin.isTableAvailable(regionLocator.getName())) {
-      throw new TableNotFoundException("Table " + table.getName() + " is not currently available.");
-    }
-
-    /*
-     * Checking hfile format is a time-consuming operation, we should have an option to skip this
-     * step when bulkloading millions of HFiles. See HBASE-13985.
-     */
-    boolean validateHFile = getConf().getBoolean("hbase.loadincremental.validate.hfile", true);
-    if (!validateHFile) {
-      LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " +
-          "are not correct. If you fail to read data from your table after using this " +
-          "option, consider removing the files and bulkload again without this option. " +
-          "See HBASE-13985");
-    }
-    // LQI queue does not need to be threadsafe -- all operations on this queue
-    // happen in this thread
-    Deque<LoadQueueItem> queue = new ArrayDeque<>();
-    ExecutorService pool = null;
-    SecureBulkLoadClient secureClient = null;
-    try {
-      prepareHFileQueue(hfofDir, table, queue, validateHFile, silence);
-
-      if (queue.isEmpty()) {
-        LOG.warn(
-            "Bulk load operation did not find any files to load in directory {}. " +
-            "Does it contain files in subdirectories that correspond to column family names?",
-            (hfofDir != null ? hfofDir.toUri().toString() : ""));
-        return Collections.emptyMap();
-      }
-      pool = createExecutorService();
-      secureClient = new SecureBulkLoadClient(table.getConfiguration(), table);
-      return performBulkLoad(admin, table, regionLocator, queue, pool, secureClient, copyFile);
-    } finally {
-      cleanup(admin, queue, pool, secureClient);
-    }
-  }
-
-  /**
-   * Used by the replication sink to load the hfiles from the source cluster. It does the following,
-   * <ol>
-   * <li>LoadIncrementalHFiles#groupOrSplitPhase(Table, ExecutorService, Deque, Pair)}</li>
-   * <li>LoadIncrementalHFiles#bulkLoadPhase(Table, Connection, ExecutorService, Deque, Multimap)
-   * </li>
-   * </ol>
-   * @param table Table to which these hfiles should be loaded to
-   * @param conn Connection to use
-   * @param queue {@link LoadQueueItem} has hfiles yet to be loaded
-   * @param startEndKeys starting and ending row keys of the region
-   */
-  public void loadHFileQueue(Table table, Connection conn, Deque<LoadQueueItem> queue,
-      Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-    loadHFileQueue(table, conn, queue, startEndKeys, false);
-  }
-
-  /**
-   * Used by the replication sink to load the hfiles from the source cluster. It does the following,
-   * <ol>
-   * <li>LoadIncrementalHFiles#groupOrSplitPhase(Table, ExecutorService, Deque, Pair)}</li>
-   * <li>LoadIncrementalHFiles#bulkLoadPhase(Table, Connection, ExecutorService, Deque, Multimap)
-   * </li>
-   * </ol>
-   * @param table Table to which these hfiles should be loaded to
-   * @param conn Connection to use
-   * @param queue {@link LoadQueueItem} has hfiles yet to be loaded
-   * @param startEndKeys starting and ending row keys of the region
-   */
-  public void loadHFileQueue(Table table, Connection conn, Deque<LoadQueueItem> queue,
-      Pair<byte[][], byte[][]> startEndKeys, boolean copyFile) throws IOException {
-    ExecutorService pool = null;
-    try {
-      pool = createExecutorService();
-      Multimap<ByteBuffer, LoadQueueItem> regionGroups =
-          groupOrSplitPhase(table, pool, queue, startEndKeys).getFirst();
-      bulkLoadPhase(table, conn, pool, queue, regionGroups, copyFile, null);
-    } finally {
-      if (pool != null) {
-        pool.shutdown();
-      }
-    }
-  }
-
-  private Map<LoadQueueItem, ByteBuffer> performBulkLoad(Admin admin, Table table,
-      RegionLocator regionLocator, Deque<LoadQueueItem> queue, ExecutorService pool,
-      SecureBulkLoadClient secureClient, boolean copyFile) throws IOException {
-    int count = 0;
-
-    fsDelegationToken.acquireDelegationToken(queue.peek().getFilePath().getFileSystem(getConf()));
-    bulkToken = secureClient.prepareBulkLoad(admin.getConnection());
-    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = null;
-
-    Map<LoadQueueItem, ByteBuffer> item2RegionMap = new HashMap<>();
-    // Assumes that region splits can happen while this occurs.
-    while (!queue.isEmpty()) {
-      // need to reload split keys each iteration.
-      final Pair<byte[][], byte[][]> startEndKeys = regionLocator.getStartEndKeys();
-      if (count != 0) {
-        LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " +
-            queue.size() + " files remaining to group or split");
-      }
-
-      int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
-      maxRetries = Math.max(maxRetries, startEndKeys.getFirst().length + 1);
-      if (maxRetries != 0 && count >= maxRetries) {
-        throw new IOException(
-            "Retry attempted " + count + " times without completing, bailing out");
-      }
-      count++;
-
-      // Using ByteBuffer for byte[] equality semantics
-      pair = groupOrSplitPhase(table, pool, queue, startEndKeys);
-      Multimap<ByteBuffer, LoadQueueItem> regionGroups = pair.getFirst();
-
-      if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
-        // Error is logged inside checkHFilesCountPerRegionPerFamily.
-        throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily +
-            " hfiles to one family of one region");
-      }
-
-      bulkLoadPhase(table, admin.getConnection(), pool, queue, regionGroups, copyFile,
-        item2RegionMap);
-
-      // NOTE: The next iteration's split / group could happen in parallel to
-      // atomic bulkloads assuming that there are splits and no merges, and
-      // that we can atomically pull out the groups we want to retry.
-    }
-
-    if (!queue.isEmpty()) {
-      throw new RuntimeException("Bulk load aborted with some files not yet loaded." +
-          "Please check log for more details.");
-    }
-    return item2RegionMap;
-  }
-
-  /**
-   * This takes the LQI's grouped by likely regions and attempts to bulk load them. Any failures are
-   * re-queued for another pass with the groupOrSplitPhase.
-   * <p>
-   * protected for testing.
-   */
-  @VisibleForTesting
-  protected void bulkLoadPhase(Table table, Connection conn, ExecutorService pool,
-      Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups,
-      boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
-    // atomically bulk load the groups.
-    Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<>();
-    for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap()
-        .entrySet()) {
-      byte[] first = e.getKey().array();
-      Collection<LoadQueueItem> lqis = e.getValue();
-
-      ClientServiceCallable<byte[]> serviceCallable =
-          buildClientServiceCallable(conn, table.getName(), first, lqis, copyFile);
-
-      Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
-        @Override
-        public List<LoadQueueItem> call() throws Exception {
-          List<LoadQueueItem> toRetry =
-              tryAtomicRegionLoad(serviceCallable, table.getName(), first, lqis);
-          return toRetry;
-        }
-      };
-      if (item2RegionMap != null) {
-        for (LoadQueueItem lqi : lqis) {
-          item2RegionMap.put(lqi, e.getKey());
-        }
-      }
-      loadingFutures.add(pool.submit(call));
-    }
-
-    // get all the results.
-    for (Future<List<LoadQueueItem>> future : loadingFutures) {
-      try {
-        List<LoadQueueItem> toRetry = future.get();
-
-        if (item2RegionMap != null) {
-          for (LoadQueueItem lqi : toRetry) {
-            item2RegionMap.remove(lqi);
-          }
-        }
-        // LQIs that are requeued to be regrouped.
-        queue.addAll(toRetry);
-
-      } catch (ExecutionException e1) {
-        Throwable t = e1.getCause();
-        if (t instanceof IOException) {
-          // At this point something unrecoverable has happened.
-          // TODO Implement bulk load recovery
-          throw new IOException("BulkLoad encountered an unrecoverable problem", t);
-        }
-        LOG.error("Unexpected execution exception during bulk load", e1);
-        throw new IllegalStateException(t);
-      } catch (InterruptedException e1) {
-        LOG.error("Unexpected interrupted exception during bulk load", e1);
-        throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
-      }
-    }
-  }
-
-  @VisibleForTesting
-  protected ClientServiceCallable<byte[]> buildClientServiceCallable(Connection conn,
-      TableName tableName, byte[] first, Collection<LoadQueueItem> lqis, boolean copyFile) {
-    List<Pair<byte[], String>> famPaths =
-        lqis.stream().map(lqi -> Pair.newPair(lqi.getFamily(), lqi.getFilePath().toString()))
-            .collect(Collectors.toList());
-    return new ClientServiceCallable<byte[]>(conn, tableName, first,
-        rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) {
-      @Override
-      protected byte[] rpcCall() throws Exception {
-        SecureBulkLoadClient secureClient = null;
-        boolean success = false;
-        try {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Going to connect to server " + getLocation() + " for row " +
-                Bytes.toStringBinary(getRow()) + " with hfile group " +
-                LoadIncrementalHFiles.this.toString(famPaths));
-          }
-          byte[] regionName = getLocation().getRegionInfo().getRegionName();
-          try (Table table = conn.getTable(getTableName())) {
-            secureClient = new SecureBulkLoadClient(getConf(), table);
-            success = secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName,
-              assignSeqIds, fsDelegationToken.getUserToken(), bulkToken, copyFile);
-          }
-          return success ? regionName : null;
-        } finally {
-          // Best effort copying of files that might not have been imported
-          // from the staging directory back to original location
-          // in user directory
-          if (secureClient != null && !success) {
-            FileSystem targetFs = FileSystem.get(getConf());
-            FileSystem sourceFs = lqis.iterator().next().getFilePath().getFileSystem(getConf());
-            // Check to see if the source and target filesystems are the same
-            // If they are the same filesystem, we will try move the files back
-            // because previously we moved them to the staging directory.
-            if (FSHDFSUtils.isSameHdfs(getConf(), sourceFs, targetFs)) {
-              for (Pair<byte[], String> el : famPaths) {
-                Path hfileStagingPath = null;
-                Path hfileOrigPath = new Path(el.getSecond());
-                try {
-                  hfileStagingPath = new Path(new Path(bulkToken, Bytes.toString(el.getFirst())),
-                      hfileOrigPath.getName());
-                  if (targetFs.rename(hfileStagingPath, hfileOrigPath)) {
-                    LOG.debug("Moved back file " + hfileOrigPath + " from " + hfileStagingPath);
-                  } else if (targetFs.exists(hfileStagingPath)) {
-                    LOG.debug(
-                      "Unable to move back file " + hfileOrigPath + " from " + hfileStagingPath);
-                  }
-                } catch (Exception ex) {
-                  LOG.debug(
-                    "Unable to move back file " + hfileOrigPath + " from " + hfileStagingPath, ex);
-                }
-              }
-            }
-          }
-        }
-      }
-    };
-  }
-
-  private boolean checkHFilesCountPerRegionPerFamily(
-      final Multimap<ByteBuffer, LoadQueueItem> regionGroups) {
-    for (Map.Entry<ByteBuffer, Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
-      Map<byte[], MutableInt> filesMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
-      for (LoadQueueItem lqi : e.getValue()) {
-        MutableInt count = filesMap.computeIfAbsent(lqi.getFamily(), k -> new MutableInt());
-        count.increment();
-        if (count.intValue() > maxFilesPerRegionPerFamily) {
-          LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily +
-              " hfiles to family " + Bytes.toStringBinary(lqi.getFamily()) +
-              " of region with start key " + Bytes.toStringBinary(e.getKey()));
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  /**
-   * @param table the table to load into
-   * @param pool the ExecutorService
-   * @param queue the queue for LoadQueueItem
-   * @param startEndKeys start and end keys
-   * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles.
-   */
-  private Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> groupOrSplitPhase(
-      final Table table, ExecutorService pool, Deque<LoadQueueItem> queue,
-      final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-    // <region start key, LQI> need synchronized only within this scope of this
-    // phase because of the puts that happen in futures.
-    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
-    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);
-    Set<String> missingHFiles = new HashSet<>();
-    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair =
-        new Pair<>(regionGroups, missingHFiles);
-
-    // drain LQIs and figure out bulk load groups
-    Set<Future<Pair<List<LoadQueueItem>, String>>> splittingFutures = new HashSet<>();
-    while (!queue.isEmpty()) {
-      final LoadQueueItem item = queue.remove();
-
-      final Callable<Pair<List<LoadQueueItem>, String>> call =
-          new Callable<Pair<List<LoadQueueItem>, String>>() {
-            @Override
-            public Pair<List<LoadQueueItem>, String> call() throws Exception {
-              Pair<List<LoadQueueItem>, String> splits =
-                  groupOrSplit(regionGroups, item, table, startEndKeys);
-              return splits;
-            }
-          };
-      splittingFutures.add(pool.submit(call));
-    }
-    // get all the results. All grouping and splitting must finish before
-    // we can attempt the atomic loads.
-    for (Future<Pair<List<LoadQueueItem>, String>> lqis : splittingFutures) {
-      try {
-        Pair<List<LoadQueueItem>, String> splits = lqis.get();
-        if (splits != null) {
-          if (splits.getFirst() != null) {
-            queue.addAll(splits.getFirst());
-          } else {
-            missingHFiles.add(splits.getSecond());
-          }
-        }
-      } catch (ExecutionException e1) {
-        Throwable t = e1.getCause();
-        if (t instanceof IOException) {
-          LOG.error("IOException during splitting", e1);
-          throw (IOException) t; // would have been thrown if not parallelized,
-        }
-        LOG.error("Unexpected execution exception during splitting", e1);
-        throw new IllegalStateException(t);
-      } catch (InterruptedException e1) {
-        LOG.error("Unexpected interrupted exception during splitting", e1);
-        throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
-      }
-    }
-    return pair;
-  }
-
-  private List<LoadQueueItem> splitStoreFile(final LoadQueueItem item, final Table table,
-      byte[] startKey, byte[] splitKey) throws IOException {
-    Path hfilePath = item.getFilePath();
-    byte[] family = item.getFamily();
-    Path tmpDir = hfilePath.getParent();
-    if (!tmpDir.getName().equals(TMP_DIR)) {
-      tmpDir = new Path(tmpDir, TMP_DIR);
-    }
-
-    LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + "region. Splitting...");
-
-    String uniqueName = getUniqueName();
-    ColumnFamilyDescriptor familyDesc = table.getDescriptor().getColumnFamily(family);
-
-    Path botOut = new Path(tmpDir, uniqueName + ".bottom");
-    Path topOut = new Path(tmpDir, uniqueName + ".top");
-    splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut);
-
-    FileSystem fs = tmpDir.getFileSystem(getConf());
-    fs.setPermission(tmpDir, FsPermission.valueOf("-rwxrwxrwx"));
-    fs.setPermission(botOut, FsPermission.valueOf("-rwxrwxrwx"));
-    fs.setPermission(topOut, FsPermission.valueOf("-rwxrwxrwx"));
-
-    // Add these back at the *front* of the queue, so there's a lower
-    // chance that the region will just split again before we get there.
-    List<LoadQueueItem> lqis = new ArrayList<>(2);
-    lqis.add(new LoadQueueItem(family, botOut));
-    lqis.add(new LoadQueueItem(family, topOut));
-
-    // If the current item is already the result of previous splits,
-    // we don't need it anymore. Clean up to save space.
-    // It is not part of the original input files.
-    try {
-      if (tmpDir.getName().equals(TMP_DIR)) {
-        fs.delete(hfilePath, false);
-      }
-    } catch (IOException e) {
-      LOG.warn("Unable to delete temporary split file " + hfilePath);
-    }
-    LOG.info("Successfully split into new HFiles " + botOut + " and " + topOut);
-    return lqis;
-  }
-
-  /**
-   * Attempt to assign the given load queue item into its target region group. If the hfile boundary
-   * no longer fits into a region, physically splits the hfile such that the new bottom half will
-   * fit and returns the list of LQI's corresponding to the resultant hfiles.
-   * <p>
-   * protected for testing
-   * @throws IOException if an IO failure is encountered
-   */
-  @VisibleForTesting
-  protected Pair<List<LoadQueueItem>, String> groupOrSplit(
-      Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table table,
-      final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-    Path hfilePath = item.getFilePath();
-    Optional<byte[]> first, last;
-    try (HFile.Reader hfr = HFile.createReader(hfilePath.getFileSystem(getConf()), hfilePath,
-      CacheConfig.DISABLED, true, getConf())) {
-      hfr.loadFileInfo();
-      first = hfr.getFirstRowKey();
-      last = hfr.getLastRowKey();
-    } catch (FileNotFoundException fnfe) {
-      LOG.debug("encountered", fnfe);
-      return new Pair<>(null, hfilePath.getName());
-    }
-
-    LOG.info("Trying to load hfile=" + hfilePath + " first=" + first.map(Bytes::toStringBinary) +
-        " last=" + last.map(Bytes::toStringBinary));
-    if (!first.isPresent() || !last.isPresent()) {
-      assert !first.isPresent() && !last.isPresent();
-      // TODO what if this is due to a bad HFile?
-      LOG.info("hfile " + hfilePath + " has no entries, skipping");
-      return null;
-    }
-    if (Bytes.compareTo(first.get(), last.get()) > 0) {
-      throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(first.get()) +
-          " > " + Bytes.toStringBinary(last.get()));
-    }
-    int idx = Arrays.binarySearch(startEndKeys.getFirst(), first.get(), Bytes.BYTES_COMPARATOR);
-    if (idx < 0) {
-      // not on boundary, returns -(insertion index). Calculate region it
-      // would be in.
-      idx = -(idx + 1) - 1;
-    }
-    int indexForCallable = idx;
-
-    /**
-     * we can consider there is a region hole in following conditions. 1) if idx < 0,then first
-     * region info is lost. 2) if the endkey of a region is not equal to the startkey of the next
-     * region. 3) if the endkey of the last region is not empty.
-     */
-    if (indexForCallable < 0) {
-      throw new IOException("The first region info for table " + table.getName() +
-          " can't be found in hbase:meta.Please use hbck tool to fix it first.");
-    } else if ((indexForCallable == startEndKeys.getFirst().length - 1) &&
-        !Bytes.equals(startEndKeys.getSecond()[indexForCallable], HConstants.EMPTY_BYTE_ARRAY)) {
-      throw new IOException("The last region info for table " + table.getName() +
-          " can't be found in hbase:meta.Please use hbck tool to fix it first.");
-    } else if (indexForCallable + 1 < startEndKeys.getFirst().length &&
-        !(Bytes.compareTo(startEndKeys.getSecond()[indexForCallable],
-          startEndKeys.getFirst()[indexForCallable + 1]) == 0)) {
-      throw new IOException("The endkey of one region for table " + table.getName() +
-          " is not equal to the startkey of the next region in hbase:meta." +
-          "Please use hbck tool to fix it first.");
-    }
-
-    boolean lastKeyInRange = Bytes.compareTo(last.get(), startEndKeys.getSecond()[idx]) < 0 ||
-        Bytes.equals(startEndKeys.getSecond()[idx], HConstants.EMPTY_BYTE_ARRAY);
-    if (!lastKeyInRange) {
-      List<LoadQueueItem> lqis = splitStoreFile(item, table,
-        startEndKeys.getFirst()[indexForCallable], startEndKeys.getSecond()[indexForCallable]);
-      return new Pair<>(lqis, null);
-    }
-
-    // group regions.
-    regionGroups.put(ByteBuffer.wrap(startEndKeys.getFirst()[idx]), item);
-    return null;
-  }
-
-  /**
-   * Attempts to do an atomic load of many hfiles into a region. If it fails, it returns a list of
-   * hfiles that need to be retried. If it is successful it will return an empty list.
-   * <p>
-   * NOTE: To maintain row atomicity guarantees, region server callable should succeed atomically
-   * and fails atomically.
-   * <p>
-   * Protected for testing.
-   * @return empty list if success, list of items to retry on recoverable failure
-   */
-  @VisibleForTesting
-  protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable,
-      final TableName tableName, final byte[] first, final Collection<LoadQueueItem> lqis)
-      throws IOException {
-    List<LoadQueueItem> toRetry = new ArrayList<>();
-    try {
-      Configuration conf = getConf();
-      byte[] region = RpcRetryingCallerFactory.instantiate(conf, null).<byte[]> newCaller()
-          .callWithRetries(serviceCallable, Integer.MAX_VALUE);
-      if (region == null) {
-        LOG.warn("Attempt to bulk load region containing " + Bytes.toStringBinary(first) +
-            " into table " + tableName + " with files " + lqis +
-            " failed.  This is recoverable and they will be retried.");
-        toRetry.addAll(lqis); // return lqi's to retry
-      }
-      // success
-      return toRetry;
-    } catch (IOException e) {
-      LOG.error("Encountered unrecoverable error from region server, additional details: " +
-                      serviceCallable.getExceptionMessageAdditionalDetail(),
-              e);
-      LOG.warn(
-              "Received a " + e.getClass().getSimpleName()
-                      + " from region server: "
-                      + serviceCallable.getExceptionMessageAdditionalDetail(), e);
-      if (getConf().getBoolean(RETRY_ON_IO_EXCEPTION, false)
-              && numRetries.get() < getConf().getInt(
-              HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-              HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER)) {
-        LOG.warn("Will attempt to retry loading failed HFiles. Retry #"
-                + numRetries.incrementAndGet());
-        toRetry.addAll(lqis);
-        return toRetry;
-      }
-      LOG.error(RETRY_ON_IO_EXCEPTION + " is disabled. Unable to recover");
-      throw e;
-    }
-  }
-
-  /**
-   * If the table is created for the first time, then "completebulkload" reads the files twice. More
-   * modifications necessary if we want to avoid doing it.
-   */
-  private void createTable(TableName tableName, Path hfofDir, Admin admin) throws IOException {
-    final FileSystem fs = hfofDir.getFileSystem(getConf());
-
-    // Add column families
-    // Build a set of keys
-    List<ColumnFamilyDescriptorBuilder> familyBuilders = new ArrayList<>();
-    SortedMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
-    visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor<ColumnFamilyDescriptorBuilder>() {
-      @Override
-      public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) {
-        ColumnFamilyDescriptorBuilder builder =
-            ColumnFamilyDescriptorBuilder.newBuilder(familyName);
-        familyBuilders.add(builder);
-        return builder;
-      }
-
-      @Override
-      public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus)
-          throws IOException {
-        Path hfile = hfileStatus.getPath();
-        try (HFile.Reader reader =
-            HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) {
-          if (builder.getCompressionType() != reader.getFileContext().getCompression()) {
-            builder.setCompressionType(reader.getFileContext().getCompression());
-            LOG.info("Setting compression " + reader.getFileContext().getCompression().name() +
-                " for family " + builder.getNameAsString());
-          }
-          reader.loadFileInfo();
-          byte[] first = reader.getFirstRowKey().get();
-          byte[] last = reader.getLastRowKey().get();
-
-          LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" +
-              Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
-
-          // To eventually infer start key-end key boundaries
-          Integer value = map.containsKey(first) ? map.get(first) : 0;
-          map.put(first, value + 1);
-
-          value = map.containsKey(last) ? map.get(last) : 0;
-          map.put(last, value - 1);
-        }
-      }
-    });
-
-    byte[][] keys = inferBoundaries(map);
-    TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName);
-    familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build)
-        .forEachOrdered(tdBuilder::setColumnFamily);
-    admin.createTable(tdBuilder.build(), keys);
-
-    LOG.info("Table " + tableName + " is available!!");
-  }
-
-  private void cleanup(Admin admin, Deque<LoadQueueItem> queue, ExecutorService pool,
-      SecureBulkLoadClient secureClient) throws IOException {
-    fsDelegationToken.releaseDelegationToken();
-    if (bulkToken != null && secureClient != null) {
-      secureClient.cleanupBulkLoad(admin.getConnection(), bulkToken);
-    }
-    if (pool != null) {
-      pool.shutdown();
-    }
-    if (!queue.isEmpty()) {
-      StringBuilder err = new StringBuilder();
-      err.append("-------------------------------------------------\n");
-      err.append("Bulk load aborted with some files not yet loaded:\n");
-      err.append("-------------------------------------------------\n");
-      for (LoadQueueItem q : queue) {
-        err.append("  ").append(q.getFilePath()).append('\n');
-      }
-      LOG.error(err.toString());
-    }
-  }
-
-  // unique file name for the table
-  private String getUniqueName() {
-    return UUID.randomUUID().toString().replaceAll("-", "");
-  }
-
-  /**
-   * Checks whether there is any invalid family name in HFiles to be bulk loaded.
-   */
-  private void validateFamiliesInHFiles(Table table, Deque<LoadQueueItem> queue, boolean silence)
-      throws IOException {
-    Set<String> familyNames = Arrays.asList(table.getDescriptor().getColumnFamilies()).stream()
-        .map(f -> f.getNameAsString()).collect(Collectors.toSet());
-    List<String> unmatchedFamilies = queue.stream().map(item -> Bytes.toString(item.getFamily()))
-        .filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList());
-    if (unmatchedFamilies.size() > 0) {
-      String msg =
-          "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " +
-              unmatchedFamilies + "; valid family names of table " + table.getName() + " are: " +
-              familyNames;
-      LOG.error(msg);
-      if (!silence) {
-        throw new IOException(msg);
-      }
-    }
-  }
-
-  /**
-   * Populate the Queue with given HFiles
-   */
-  private void populateLoadQueue(Deque<LoadQueueItem> ret, Map<byte[], List<Path>> map) {
-    map.forEach((k, v) -> v.stream().map(p -> new LoadQueueItem(k, p)).forEachOrdered(ret::add));
-  }
-
-  /**
-   * Walk the given directory for all HFiles, and return a Queue containing all such files.
-   */
-  private void discoverLoadQueue(final Deque<LoadQueueItem> ret, final Path hfofDir,
-      final boolean validateHFile) throws IOException {
-    visitBulkHFiles(hfofDir.getFileSystem(getConf()), hfofDir, new BulkHFileVisitor<byte[]>() {
-      @Override
-      public byte[] bulkFamily(final byte[] familyName) {
-        return familyName;
-      }
-
-      @Override
-      public void bulkHFile(final byte[] family, final FileStatus hfile) throws IOException {
-        long length = hfile.getLen();
-        if (length > getConf().getLong(HConstants.HREGION_MAX_FILESIZE,
-          HConstants.DEFAULT_MAX_FILE_SIZE)) {
-          LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length +
-              " bytes can be problematic as it may lead to oversplitting.");
-        }
-        ret.add(new LoadQueueItem(family, hfile.getPath()));
-      }
-    }, validateHFile);
-  }
-
-  private interface BulkHFileVisitor<TFamily> {
-
-    TFamily bulkFamily(byte[] familyName) throws IOException;
-
-    void bulkHFile(TFamily family, FileStatus hfileStatus) throws IOException;
-  }
-
-  /**
-   * Iterate over the bulkDir hfiles. Skip reference, HFileLink, files starting with "_" and
-   * non-valid hfiles.
-   */
-  private static <TFamily> void visitBulkHFiles(final FileSystem fs, final Path bulkDir,
-      final BulkHFileVisitor<TFamily> visitor) throws IOException {
-    visitBulkHFiles(fs, bulkDir, visitor, true);
-  }
-
-  /**
-   * Iterate over the bulkDir hfiles. Skip reference, HFileLink, files starting with "_". Check and
-   * skip non-valid hfiles by default, or skip this validation by setting
-   * 'hbase.loadincremental.validate.hfile' to false.
-   */
-  private static <TFamily> void visitBulkHFiles(FileSystem fs, Path bulkDir,
-      BulkHFileVisitor<TFamily> visitor, boolean validateHFile) throws IOException {
-    FileStatus[] familyDirStatuses = fs.listStatus(bulkDir);
-    for (FileStatus familyStat : familyDirStatuses) {
-      if (!familyStat.isDirectory()) {
-        LOG.warn("Skipping non-directory " + familyStat.getPath());
-        continue;
-      }
-      Path familyDir = familyStat.getPath();
-      byte[] familyName = Bytes.toBytes(familyDir.getName());
-      // Skip invalid family
-      try {
-        ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(familyName);
-      } catch (IllegalArgumentException e) {
-        LOG.warn("Skipping invalid " + familyStat.getPath());
-        continue;
-      }
-      TFamily family = visitor.bulkFamily(familyName);
-
-      FileStatus[] hfileStatuses = fs.listStatus(familyDir);
-      for (FileStatus hfileStatus : hfileStatuses) {
-        if (!fs.isFile(hfileStatus.getPath())) {
-          LOG.warn("Skipping non-file " + hfileStatus);
-          continue;
-        }
-
-        Path hfile = hfileStatus.getPath();
-        // Skip "_", reference, HFileLink
-        String fileName = hfile.getName();
-        if (fileName.startsWith("_")) {
-          continue;
-        }
-        if (StoreFileInfo.isReference(fileName)) {
-          LOG.warn("Skipping reference " + fileName);
-          continue;
-        }
-        if (HFileLink.isHFileLink(fileName)) {
-          LOG.warn("Skipping HFileLink " + fileName);
-          continue;
-        }
-
-        // Validate HFile Format if needed
-        if (validateHFile) {
-          try {
-            if (!HFile.isHFileFormat(fs, hfile)) {
-              LOG.warn("the file " + hfile + " doesn't seems to be an hfile. skipping");
-              continue;
-            }
-          } catch (FileNotFoundException e) {
-            LOG.warn("the file " + hfile + " was removed");
-            continue;
-          }
-        }
-
-        visitor.bulkHFile(family, hfileStatus);
-      }
-    }
-  }
-
-  // Initialize a thread pool
-  private ExecutorService createExecutorService() {
-    ThreadPoolExecutor pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
-        new LinkedBlockingQueue<>(),
-        new ThreadFactoryBuilder().setNameFormat("LoadIncrementalHFiles-%1$d").build());
-    pool.allowCoreThreadTimeOut(true);
-    return pool;
-  }
-
-  private final String toString(List<Pair<byte[], String>> list) {
-    StringBuilder sb = new StringBuilder();
-    sb.append('[');
-    list.forEach(p -> {
-      sb.append('{').append(Bytes.toStringBinary(p.getFirst())).append(',').append(p.getSecond())
-          .append('}');
-    });
-    sb.append(']');
-    return sb.toString();
-  }
-
-  /**
-   * Split a storefile into a top and bottom half, maintaining the metadata, recreating bloom
-   * filters, etc.
-   */
-  @VisibleForTesting
-  static void splitStoreFile(Configuration conf, Path inFile, ColumnFamilyDescriptor familyDesc,
-      byte[] splitKey, Path bottomOut, Path topOut) throws IOException {
-    // Open reader with no block cache, and not in-memory
-    Reference topReference = Reference.createTopReference(splitKey);
-    Reference bottomReference = Reference.createBottomReference(splitKey);
-
-    copyHFileHalf(conf, inFile, topOut, topReference, familyDesc);
-    copyHFileHalf(conf, inFile, bottomOut, bottomReference, familyDesc);
-  }
-
-  /**
-   * Copy half of an HFile into a new HFile.
-   */
-  private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile,
-      Reference reference, ColumnFamilyDescriptor familyDescriptor) throws IOException {
-    FileSystem fs = inFile.getFileSystem(conf);
-    CacheConfig cacheConf = CacheConfig.DISABLED;
-    HalfStoreFileReader halfReader = null;
-    StoreFileWriter halfWriter = null;
-    try {
-      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, true,
-          new AtomicInteger(0), true, conf);
-      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
-
-      int blocksize = familyDescriptor.getBlocksize();
-      Algorithm compression = familyDescriptor.getCompressionType();
-      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
-      HFileContext hFileContext = new HFileContextBuilder().withCompression(compression)
-          .withChecksumType(HStore.getChecksumType(conf))
-          .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize)
-          .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true)
-          .build();
-      halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile)
-          .withBloomType(bloomFilterType).withFileContext(hFileContext).build();
-      HFileScanner scanner = halfReader.getScanner(false, false, false);
-      scanner.seekTo();
-      do {
-        halfWriter.append(scanner.getCell());
-      } while (scanner.next());
-
-      for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
-        if (shouldCopyHFileMetaKey(entry.getKey())) {
-          halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
-        }
-      }
-    } finally {
-      if (halfReader != null) {
-        try {
-          halfReader.close(cacheConf.shouldEvictOnClose());
-        } catch (IOException e) {
-          LOG.warn("failed to close hfile reader for " + inFile, e);
-        }
-      }
-      if (halfWriter != null) {
-        halfWriter.close();
-      }
-
-    }
-  }
-
-  private static boolean shouldCopyHFileMetaKey(byte[] key) {
-    // skip encoding to keep hfile meta consistent with data block info, see HBASE-15085
-    if (Bytes.equals(key, HFileDataBlockEncoder.DATA_BLOCK_ENCODING)) {
-      return false;
-    }
-
-    return !HFile.isReservedFileInfoKey(key);
-  }
-
-  private boolean isCreateTable() {
-    return "yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"));
-  }
-
-  private boolean isSilence() {
-    return "yes".equalsIgnoreCase(getConf().get(IGNORE_UNMATCHED_CF_CONF_KEY, ""));
-  }
-
-  private boolean isAlwaysCopyFiles() {
-    return getConf().getBoolean(ALWAYS_COPY_FILES, false);
-  }
-
-  protected final Map<LoadQueueItem, ByteBuffer> run(Path hfofDir, TableName tableName)
-      throws IOException {
-    try (Connection connection = ConnectionFactory.createConnection(getConf());
-        Admin admin = connection.getAdmin()) {
-      if (!admin.tableExists(tableName)) {
-        if (isCreateTable()) {
-          createTable(tableName, hfofDir, admin);
-        } else {
-          String errorMsg = format("Table '%s' does not exist.", tableName);
-          LOG.error(errorMsg);
-          throw new TableNotFoundException(errorMsg);
-        }
-      }
-      try (Table table = connection.getTable(tableName);
-          RegionLocator locator = connection.getRegionLocator(tableName)) {
-        return doBulkLoad(hfofDir, admin, table, locator, isSilence(),
-            isAlwaysCopyFiles());
-      }
-    }
-  }
-  /**
-   * Perform bulk load on the given table.
-   * @param hfofDir the directory that was provided as the output path of a job using
-   *          HFileOutputFormat
-   * @param tableName the table to load into
-   */
-  public Map<LoadQueueItem, ByteBuffer> run(String hfofDir, TableName tableName)
-      throws IOException {
-    return run(new Path(hfofDir), tableName);
-  }
-
-  /**
-   * Perform bulk load on the given table.
-   * @param family2Files map of family to List of hfiles
-   * @param tableName the table to load into
-   */
-  public Map<LoadQueueItem, ByteBuffer> run(Map<byte[], List<Path>> family2Files,
-      TableName tableName) throws IOException {
-    try (Connection connection = ConnectionFactory.createConnection(getConf());
-        Admin admin = connection.getAdmin()) {
-      if (!admin.tableExists(tableName)) {
-        String errorMsg = format("Table '%s' does not exist.", tableName);
-        LOG.error(errorMsg);
-        throw new TableNotFoundException(errorMsg);
-      }
-      try (Table table = connection.getTable(tableName);
-          RegionLocator locator = connection.getRegionLocator(tableName)) {
-        return doBulkLoad(family2Files, admin, table, locator, isSilence(), isAlwaysCopyFiles());
-      }
-    }
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    if (args.length != 2 && args.length != 3) {
-      usage();
-      return -1;
-    }
-    String dirPath = args[0];
-    TableName tableName = TableName.valueOf(args[1]);
-
-
-    if (args.length == 2) {
-      return !run(dirPath, tableName).isEmpty() ? 0 : -1;
-    } else {
-      Map<byte[], List<Path>> family2Files = Maps.newHashMap();
-      FileSystem fs = FileSystem.get(getConf());
-      for (FileStatus regionDir : fs.listStatus(new Path(dirPath))) {
-        FSVisitor.visitRegionStoreFiles(fs, regionDir.getPath(), (region, family, hfileName) -> {
-          Path path = new Path(regionDir.getPath(), new Path(family, hfileName));
-          byte[] familyName = Bytes.toBytes(family);
-          if (family2Files.containsKey(familyName)) {
-            family2Files.get(familyName).add(path);
-          } else {
-            family2Files.put(familyName, Lists.newArrayList(path));
-          }
-        });
-      }
-      return !run(family2Files, tableName).isEmpty() ? 0 : -1;
-    }
-
-  }
-
-  public static void main(String[] args) throws Exception {
-    Configuration conf = HBaseConfiguration.create();
-    int ret = ToolRunner.run(conf, new LoadIncrementalHFiles(conf), args);
-    System.exit(ret);
-  }
-
-  /**
-   * Called from replication sink, where it manages bulkToken(staging directory) by itself. This is
-   * used only when SecureBulkLoadEndpoint is configured in hbase.coprocessor.region.classes
-   * property. This directory is used as a temporary directory where all files are initially
-   * copied/moved from user given directory, set all the required file permissions and then from
-   * their it is finally loaded into a table. This should be set only when, one would like to manage
-   * the staging directory by itself. Otherwise this tool will handle this by itself.
-   * @param stagingDir staging directory path
-   */
-  public void setBulkToken(String stagingDir) {
-    this.bulkToken = stagingDir;
-  }
-
-  /**
-   * Infers region boundaries for a new table.
-   * <p>
-   * Parameter: <br>
-   * bdryMap is a map between keys to an integer belonging to {+1, -1}
-   * <ul>
-   * <li>If a key is a start key of a file, then it maps to +1</li>
-   * <li>If a key is an end key of a file, then it maps to -1</li>
-   * </ul>
-   * <p>
-   * Algo:<br>
-   * <ol>
-   * <li>Poll on the keys in order:
-   * <ol type="a">
-   * <li>Keep adding the mapped values to these keys (runningSum)</li>
-   * <li>Each time runningSum reaches 0, add the start Key from when the runningSum had started to a
-   * boundary list.</li>
-   * </ol>
-   * </li>
-   * <li>Return the boundary list.</li>
-   * </ol>
-   */
-  public static byte[][] inferBoundaries(SortedMap<byte[], Integer> bdryMap) {
-    List<byte[]> keysArray = new ArrayList<>();
-    int runningValue = 0;
-    byte[] currStartKey = null;
-    boolean firstBoundary = true;
-
-    for (Map.Entry<byte[], Integer> item : bdryMap.entrySet()) {
-      if (runningValue == 0) {
-        currStartKey = item.getKey();
-      }
-      runningValue += item.getValue();
-      if (runningValue == 0) {
-        if (!firstBoundary) {
-          keysArray.add(currStartKey);
-        }
-        firstBoundary = false;
-      }
-    }
-
-    return keysArray.toArray(new byte[0][]);
-  }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index ac2d8e1..30314b7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -126,6 +126,7 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
 import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
@@ -3562,14 +3563,12 @@ public class HBaseFsck extends Configured implements Closeable {
   }
 
   public void dumpSidelinedRegions(Map<Path, HbckInfo> regions) {
-    for (Map.Entry<Path, HbckInfo> entry: regions.entrySet()) {
+    for (Map.Entry<Path, HbckInfo> entry : regions.entrySet()) {
       TableName tableName = entry.getValue().getTableName();
       Path path = entry.getKey();
-      errors.print("This sidelined region dir should be bulk loaded: "
-        + path.toString());
-      errors.print("Bulk load command looks like: "
-        + "hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles "
-        + path.toUri().getPath() + " "+ tableName);
+      errors.print("This sidelined region dir should be bulk loaded: " + path.toString());
+      errors.print("Bulk load command looks like: " + BulkLoadHFilesTool.NAME + " " +
+        path.toUri().getPath() + " " + tableName);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
new file mode 100644
index 0000000..d5fc58e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.security.token.Token;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+
+/**
+ * Can be overridden in UT if you only want to implement part of the methods in
+ * {@link AsyncClusterConnection}.
+ */
+public class DummyAsyncClusterConnection implements AsyncClusterConnection {
+
+  @Override
+  public Configuration getConfiguration() {
+    return null;
+  }
+
+  @Override
+  public AsyncTableRegionLocator getRegionLocator(TableName tableName) {
+    return null;
+  }
+
+  @Override
+  public void clearRegionLocationCache() {
+  }
+
+  @Override
+  public AsyncTableBuilder<AdvancedScanResultConsumer> getTableBuilder(TableName tableName) {
+    return null;
+  }
+
+  @Override
+  public AsyncTableBuilder<ScanResultConsumer> getTableBuilder(TableName tableName,
+      ExecutorService pool) {
+    return null;
+  }
+
+  @Override
+  public AsyncAdminBuilder getAdminBuilder() {
+    return null;
+  }
+
+  @Override
+  public AsyncAdminBuilder getAdminBuilder(ExecutorService pool) {
+    return null;
+  }
+
+  @Override
+  public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName) {
+    return null;
+  }
+
+  @Override
+  public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName,
+      ExecutorService pool) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Hbck> getHbck() {
+    return null;
+  }
+
+  @Override
+  public Hbck getHbck(ServerName masterServer) throws IOException {
+    return null;
+  }
+
+  @Override
+  public boolean isClosed() {
+    return false;
+  }
+
+  @Override
+  public void close() throws IOException {
+  }
+
+  @Override
+  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
+    return null;
+  }
+
+  @Override
+  public NonceGenerator getNonceGenerator() {
+    return null;
+  }
+
+  @Override
+  public RpcClient getRpcClient() {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
+      boolean writeFlushWALMarker) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Long> replay(TableName tableName, byte[] encodedRegionName, byte[] row,
+      List<Entry> entries, int replicaId, int numRetries, long operationTimeoutNs) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
+      boolean reload) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<String> prepareBulkLoad(TableName tableName) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Boolean> bulkLoad(TableName tableName,
+      List<Pair<byte[], String>> familyPaths, byte[] row, boolean assignSeqNum, Token<?> userToken,
+      String bulkToken, boolean copyFiles) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Void> cleanupBulkLoad(TableName tableName, String bulkToken) {
+    return null;
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncRegistry.java
new file mode 100644
index 0000000..e9ae25d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncRegistry.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+
+/**
+ * Can be overridden in UT if you only want to implement part of the methods in
+ * {@link AsyncRegistry}.
+ */
+public class DummyAsyncRegistry implements AsyncRegistry {
+
+  public static final String REGISTRY_IMPL_CONF_KEY = AsyncRegistryFactory.REGISTRY_IMPL_CONF_KEY;
+
+  @Override
+  public CompletableFuture<RegionLocations> getMetaRegionLocation() {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<String> getClusterId() {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Integer> getCurrentNrHRS() {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<ServerName> getMasterAddress() {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Integer> getMasterInfoPort() {
+    return null;
+  }
+
+  @Override
+  public void close() {
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java
new file mode 100644
index 0000000..2e9bb74
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import com.google.protobuf.RpcChannel;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+
+/**
+ * Can be overridden in UT if you only want to implement part of the methods in {@link AsyncTable}.
+ */
+public class DummyAsyncTable<C extends ScanResultConsumerBase> implements AsyncTable<C> {
+
+  @Override
+  public TableName getName() {
+    return null;
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<TableDescriptor> getDescriptor() {
+    return null;
+  }
+
+  @Override
+  public AsyncTableRegionLocator getRegionLocator() {
+    return null;
+  }
+
+  @Override
+  public long getRpcTimeout(TimeUnit unit) {
+    return 0;
+  }
+
+  @Override
+  public long getReadRpcTimeout(TimeUnit unit) {
+    return 0;
+  }
+
+  @Override
+  public long getWriteRpcTimeout(TimeUnit unit) {
+    return 0;
+  }
+
+  @Override
+  public long getOperationTimeout(TimeUnit unit) {
+    return 0;
+  }
+
+  @Override
+  public long getScanTimeout(TimeUnit unit) {
+    return 0;
+  }
+
+  @Override
+  public CompletableFuture<Result> get(Get get) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Void> put(Put put) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Void> delete(Delete delete) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Result> append(Append append) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Result> increment(Increment increment) {
+    return null;
+  }
+
+  @Override
+  public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<Void> mutateRow(RowMutations mutation) {
+    return null;
+  }
+
+  @Override
+  public void scan(Scan scan, C consumer) {
+  }
+
+  @Override
+  public ResultScanner getScanner(Scan scan) {
+    return null;
+  }
+
+  @Override
+  public CompletableFuture<List<Result>> scanAll(Scan scan) {
+    return null;
+  }
+
+  @Override
+  public List<CompletableFuture<Result>> get(List<Get> gets) {
+    return null;
+  }
+
+  @Override
+  public List<CompletableFuture<Void>> put(List<Put> puts) {
+    return null;
+  }
+
+  @Override
+  public List<CompletableFuture<Void>> delete(List<Delete> deletes) {
+    return null;
+  }
+
+  @Override
+  public <T> List<CompletableFuture<T>> batch(List<? extends Row> actions) {
+    return null;
+  }
+
+  @Override
+  public <S, R> CompletableFuture<R> coprocessorService(Function<RpcChannel, S> stubMaker,
+      ServiceCaller<S, R> callable, byte[] row) {
+    return null;
+  }
+
+  @Override
+  public <S, R> CoprocessorServiceBuilder<S, R> coprocessorService(
+      Function<RpcChannel, S> stubMaker, ServiceCaller<S, R> callable,
+      CoprocessorCallback<R> callback) {
+    return null;
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index 987ac7e..d53353e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -18,10 +18,12 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Optional;
+import java.util.TreeMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -45,15 +47,14 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -472,40 +473,17 @@ public class TestReplicaWithCluster {
     final int numRows = 10;
     final byte[] qual = Bytes.toBytes("qual");
     final byte[] val  = Bytes.toBytes("val");
-    final List<Pair<byte[], String>> famPaths = new ArrayList<>();
+    Map<byte[], List<Path>> family2Files = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for (HColumnDescriptor col : hdt.getColumnFamilies()) {
       Path hfile = new Path(dir, col.getNameAsString());
-      TestHRegionServerBulkLoad.createHFile(HTU.getTestFileSystem(), hfile, col.getName(),
-        qual, val, numRows);
-      famPaths.add(new Pair<>(col.getName(), hfile.toString()));
+      TestHRegionServerBulkLoad.createHFile(HTU.getTestFileSystem(), hfile, col.getName(), qual,
+        val, numRows);
+      family2Files.put(col.getName(), Collections.singletonList(hfile));
     }
 
     // bulk load HFiles
     LOG.debug("Loading test data");
-    final ClusterConnection conn = (ClusterConnection) HTU.getAdmin().getConnection();
-    table = conn.getTable(hdt.getTableName());
-    final String bulkToken =
-        new SecureBulkLoadClient(HTU.getConfiguration(), table).prepareBulkLoad(conn);
-    ClientServiceCallable<Void> callable = new ClientServiceCallable<Void>(conn,
-        hdt.getTableName(), TestHRegionServerBulkLoad.rowkey(0),
-        new RpcControllerFactory(HTU.getConfiguration()).newController(), HConstants.PRIORITY_UNSET) {
-      @Override
-      protected Void rpcCall() throws Exception {
-        LOG.debug("Going to connect to server " + getLocation() + " for row "
-            + Bytes.toStringBinary(getRow()));
-        SecureBulkLoadClient secureClient = null;
-        byte[] regionName = getLocation().getRegionInfo().getRegionName();
-        try (Table table = conn.getTable(getTableName())) {
-          secureClient = new SecureBulkLoadClient(HTU.getConfiguration(), table);
-          secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName,
-              true, null, bulkToken);
-        }
-        return null;
-      }
-    };
-    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(HTU.getConfiguration());
-    RpcRetryingCaller<Void> caller = factory.newCaller();
-    caller.callWithRetries(callable, 10000);
+    BulkLoadHFiles.create(HTU.getConfiguration()).bulkLoad(hdt.getTableName(), family2Files);
 
     // verify we can read them from the primary
     LOG.debug("Verifying data load");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
index 6934c98..a90f4e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
@@ -72,7 +72,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTrack
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
@@ -567,7 +567,7 @@ public class TestRegionObserverInterface {
       createHFile(util.getConfiguration(), fs, new Path(familyDir, Bytes.toString(A)), A, A);
 
       // Bulk load
-      new LoadIncrementalHFiles(conf).doBulkLoad(dir, util.getAdmin(), table, locator);
+      BulkLoadHFiles.create(conf).bulkLoad(tableName, dir);
 
       verifyMethodResult(SimpleRegionObserver.class,
         new String[] { "hadPreBulkLoadHFile", "hadPostBulkLoadHFile" }, tableName,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
index 40cd540..84463b3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
@@ -22,41 +22,38 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Objects;
 import java.util.Random;
 import java.util.Set;
+import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClientServiceCallable;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.SecureBulkLoadClient;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
 import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
 import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
@@ -378,39 +375,21 @@ public class SpaceQuotaHelperForTests {
   /**
    * Bulk-loads a number of files with a number of rows to the given table.
    */
-  ClientServiceCallable<Boolean> generateFileToLoad(
-      TableName tn, int numFiles, int numRowsPerFile) throws Exception {
-    Connection conn = testUtil.getConnection();
+  Map<byte[], List<Path>> generateFileToLoad(TableName tn, int numFiles, int numRowsPerFile)
+      throws Exception {
     FileSystem fs = testUtil.getTestFileSystem();
-    Configuration conf = testUtil.getConfiguration();
     Path baseDir = new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files");
     fs.mkdirs(baseDir);
-    final List<Pair<byte[], String>> famPaths = new ArrayList<>();
+    List<Path> hfiles = new ArrayList<>();
     for (int i = 1; i <= numFiles; i++) {
       Path hfile = new Path(baseDir, "file" + i);
-      TestHRegionServerBulkLoad.createHFile(
-          fs, hfile, Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("my"),
-          Bytes.toBytes("file"), numRowsPerFile);
-      famPaths.add(new Pair<>(Bytes.toBytes(SpaceQuotaHelperForTests.F1), hfile.toString()));
+      TestHRegionServerBulkLoad.createHFile(fs, hfile, Bytes.toBytes(SpaceQuotaHelperForTests.F1),
+        Bytes.toBytes("my"), Bytes.toBytes("file"), numRowsPerFile);
+      hfiles.add(hfile);
     }
-
-    // bulk load HFiles
-    Table table = conn.getTable(tn);
-    final String bulkToken = new SecureBulkLoadClient(conf, table).prepareBulkLoad(conn);
-    return new ClientServiceCallable<Boolean>(
-        conn, tn, Bytes.toBytes("row"), new RpcControllerFactory(conf).newController(),
-        HConstants.PRIORITY_UNSET) {
-      @Override
-     public Boolean rpcCall() throws Exception {
-        SecureBulkLoadClient secureClient = null;
-        byte[] regionName = getLocation().getRegion().getRegionName();
-        try (Table table = conn.getTable(getTableName())) {
-          secureClient = new SecureBulkLoadClient(conf, table);
-          return secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName,
-                true, null, bulkToken);
-        }
-      }
-    };
+    Map<byte[], List<Path>> family2Files = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    family2Files.put(Bytes.toBytes(SpaceQuotaHelperForTests.F1), hfiles);
+    return family2Files;
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java
index fdc7ad3..9e3dd58 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java
@@ -17,12 +17,13 @@
 package org.apache.hadoop.hbase.quotas;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -31,11 +32,8 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClientServiceCallable;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.RpcRetryingCaller;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.client.SnapshotType;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate;
@@ -43,6 +41,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -214,7 +213,7 @@ public class TestLowLatencySpaceQuotas {
         tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
     admin.setQuota(settings);
 
-    ClientServiceCallable<Boolean> callable = helper.generateFileToLoad(tn, 3, 550);
+    Map<byte[], List<Path>> family2Files = helper.generateFileToLoad(tn, 3, 550);
     // Make sure the files are about as long as we expect
     FileSystem fs = TEST_UTIL.getTestFileSystem();
     FileStatus[] files = fs.listStatus(
@@ -228,13 +227,13 @@ public class TestLowLatencySpaceQuotas {
       totalSize += file.getLen();
     }
 
-    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration());
-    RpcRetryingCaller<Boolean> caller = factory.<Boolean> newCaller();
-    assertTrue("The bulk load failed", caller.callWithRetries(callable, Integer.MAX_VALUE));
+    assertFalse("The bulk load failed",
+      BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tn, family2Files).isEmpty());
 
     final long finalTotalSize = totalSize;
     TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
-      @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
+      @Override
+      boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
         return snapshot.getUsage() >= finalTotalSize;
       }
     });
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java
index 05ee68a..fca5453 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java
@@ -17,13 +17,17 @@
  */
 package org.apache.hadoop.hbase.quotas;
 
+import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.atomic.AtomicLong;
@@ -38,7 +42,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.ClientServiceCallable;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Increment;
@@ -47,8 +50,6 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.RpcRetryingCaller;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.quotas.policies.DefaultViolationPolicyEnforcement
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.AfterClass;
@@ -237,19 +239,18 @@ public class TestSpaceQuotas {
   @Test
   public void testNoBulkLoadsWithNoWrites() throws Exception {
     Put p = new Put(Bytes.toBytes("to_reject"));
-    p.addColumn(
-        Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), Bytes.toBytes("reject"));
+    p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"),
+      Bytes.toBytes("reject"));
     TableName tableName = writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, p);
 
     // The table is now in violation. Try to do a bulk load
-    ClientServiceCallable<Boolean> callable = helper.generateFileToLoad(tableName, 1, 50);
-    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration());
-    RpcRetryingCaller<Boolean> caller = factory.newCaller();
+    Map<byte[], List<Path>> family2Files = helper.generateFileToLoad(tableName, 1, 50);
     try {
-      caller.callWithRetries(callable, Integer.MAX_VALUE);
+      BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tableName, family2Files);
       fail("Expected the bulk load call to fail!");
-    } catch (SpaceLimitingException e) {
+    } catch (IOException e) {
       // Pass
+      assertThat(e.getCause(), instanceOf(SpaceLimitingException.class));
       LOG.trace("Caught expected exception", e);
     }
   }
@@ -293,7 +294,7 @@ public class TestSpaceQuotas {
         enforcement instanceof DefaultViolationPolicyEnforcement);
 
     // Should generate two files, each of which is over 25KB each
-    ClientServiceCallable<Boolean> callable = helper.generateFileToLoad(tn, 2, 525);
+    Map<byte[], List<Path>> family2Files = helper.generateFileToLoad(tn, 2, 525);
     FileSystem fs = TEST_UTIL.getTestFileSystem();
     FileStatus[] files = fs.listStatus(
         new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files"));
@@ -305,13 +306,12 @@ public class TestSpaceQuotas {
       LOG.debug(file.getPath() + " -> " + file.getLen() +"B");
     }
 
-    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration());
-    RpcRetryingCaller<Boolean> caller = factory.newCaller();
     try {
-      caller.callWithRetries(callable, Integer.MAX_VALUE);
+      BulkLoadHFiles.create(TEST_UTIL.getConfiguration()).bulkLoad(tn, family2Files);
       fail("Expected the bulk load call to fail!");
-    } catch (SpaceLimitingException e) {
+    } catch (IOException e) {
       // Pass
+      assertThat(e.getCause(), instanceOf(SpaceLimitingException.class));
       LOG.trace("Caught expected exception", e);
     }
     // Verify that we have no data in the table because neither file should have been
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index c86f3e1..fd02cf4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -25,9 +25,11 @@ import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -53,7 +55,6 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.RpcRetryingCaller;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.SecureBulkLoadClient;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
@@ -71,8 +72,8 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
@@ -204,59 +205,37 @@ public class TestHRegionServerBulkLoad {
       // create HFiles for different column families
       FileSystem fs = UTIL.getTestFileSystem();
       byte[] val = Bytes.toBytes(String.format("%010d", iteration));
-      final List<Pair<byte[], String>> famPaths = new ArrayList<>(NUM_CFS);
+      Map<byte[], List<Path>> family2Files = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       for (int i = 0; i < NUM_CFS; i++) {
         Path hfile = new Path(dir, family(i));
         byte[] fam = Bytes.toBytes(family(i));
         createHFile(fs, hfile, fam, QUAL, val, 1000);
-        famPaths.add(new Pair<>(fam, hfile.toString()));
+        family2Files.put(fam, Collections.singletonList(hfile));
       }
-
       // bulk load HFiles
-      final ClusterConnection conn = (ClusterConnection)UTIL.getConnection();
-      Table table = conn.getTable(tableName);
-      final String bulkToken = new SecureBulkLoadClient(UTIL.getConfiguration(), table).
-          prepareBulkLoad(conn);
-      ClientServiceCallable<Void> callable = new ClientServiceCallable<Void>(conn,
-          tableName, Bytes.toBytes("aaa"),
-          new RpcControllerFactory(UTIL.getConfiguration()).newController(), HConstants.PRIORITY_UNSET) {
-        @Override
-        public Void rpcCall() throws Exception {
-          LOG.debug("Going to connect to server " + getLocation() + " for row "
-              + Bytes.toStringBinary(getRow()));
-          SecureBulkLoadClient secureClient = null;
-          byte[] regionName = getLocation().getRegionInfo().getRegionName();
-          try (Table table = conn.getTable(getTableName())) {
-            secureClient = new SecureBulkLoadClient(UTIL.getConfiguration(), table);
-            secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName,
-                  true, null, bulkToken);
-          }
-          return null;
-        }
-      };
-      RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf);
-      RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
-      caller.callWithRetries(callable, Integer.MAX_VALUE);
-
+      BulkLoadHFiles.create(UTIL.getConfiguration()).bulkLoad(tableName, family2Files);
       // Periodically do compaction to reduce the number of open file handles.
       if (numBulkLoads.get() % 5 == 0) {
+        RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf);
+        RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
         // 5 * 50 = 250 open file handles!
-        callable = new ClientServiceCallable<Void>(conn,
-            tableName, Bytes.toBytes("aaa"),
-            new RpcControllerFactory(UTIL.getConfiguration()).newController(), HConstants.PRIORITY_UNSET) {
-          @Override
-          protected Void rpcCall() throws Exception {
-            LOG.debug("compacting " + getLocation() + " for row "
-                + Bytes.toStringBinary(getRow()));
-            AdminProtos.AdminService.BlockingInterface server =
-              conn.getAdmin(getLocation().getServerName());
-            CompactRegionRequest request = RequestConverter.buildCompactRegionRequest(
+        ClientServiceCallable<Void> callable =
+          new ClientServiceCallable<Void>(UTIL.getConnection(), tableName, Bytes.toBytes("aaa"),
+            new RpcControllerFactory(UTIL.getConfiguration()).newController(),
+            HConstants.PRIORITY_UNSET) {
+            @Override
+            protected Void rpcCall() throws Exception {
+              LOG.debug(
+                "compacting " + getLocation() + " for row " + Bytes.toStringBinary(getRow()));
+              AdminProtos.AdminService.BlockingInterface server =
+                ((ClusterConnection) UTIL.getConnection()).getAdmin(getLocation().getServerName());
+              CompactRegionRequest request = RequestConverter.buildCompactRegionRequest(
                 getLocation().getRegionInfo().getRegionName(), true, null);
-            server.compactRegion(null, request);
-            numCompactions.incrementAndGet();
-            return null;
-          }
-        };
+              server.compactRegion(null, request);
+              numCompactions.incrementAndGet();
+              return null;
+            }
+          };
         caller.callWithRetries(callable, Integer.MAX_VALUE);
       }
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
index f30f084..1fe1f3f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -45,7 +44,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -94,10 +93,7 @@ public class TestScannerWithBulkload {
       false);
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
-    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
-    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
-      bulkload.doBulkLoad(hfilePath, admin, table, locator);
-    }
+    BulkLoadHFiles.create(conf).bulkLoad(tableName, hfilePath);
     ResultScanner scanner = table.getScanner(scan);
     Result result = scanner.next();
     result = scanAfterBulkLoad(scanner, result, "version2");
@@ -233,7 +229,7 @@ public class TestScannerWithBulkload {
         "/temp/testBulkLoadWithParallelScan/col/file", false);
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
-    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
+    final BulkLoadHFiles bulkload = BulkLoadHFiles.create(conf);
     ResultScanner scanner = table.getScanner(scan);
     Result result = scanner.next();
     // Create a scanner and then do bulk load
@@ -246,9 +242,7 @@ public class TestScannerWithBulkload {
           put1.add(new KeyValue(Bytes.toBytes("row5"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
               Bytes.toBytes("version0")));
           table.put(put1);
-          try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
-            bulkload.doBulkLoad(hfilePath, admin, table, locator);
-          }
+          bulkload.bulkLoad(tableName, hfilePath);
           latch.countDown();
         } catch (TableNotFoundException e) {
         } catch (IOException e) {
@@ -276,10 +270,7 @@ public class TestScannerWithBulkload {
       "/temp/testBulkLoadNativeHFile/col/file", true);
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
-    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
-    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
-      bulkload.doBulkLoad(hfilePath, admin, table, locator);
-    }
+    BulkLoadHFiles.create(conf).bulkLoad(tableName, hfilePath);
     ResultScanner scanner = table.getScanner(scan);
     Result result = scanner.next();
     // We had 'version0', 'version1' for 'row1,col:q' in the table.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java
index eb25806..5c73d07 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java
@@ -21,10 +21,8 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Deque;
 import java.util.Map;
-import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Consumer;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
@@ -32,8 +30,8 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -47,7 +45,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
@@ -59,6 +57,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
 
 
@@ -178,7 +177,7 @@ public class TestSecureBulkLoadManager {
 
   /**
    * A trick is used to make sure server-side failures( if any ) not being covered up by a client
-   * retry. Since LoadIncrementalHFiles.doBulkLoad keeps performing bulkload calls as long as the
+   * retry. Since BulkLoadHFilesTool.bulkLoad keeps performing bulkload calls as long as the
    * HFile queue is not empty, while server-side exceptions in the doAs block do not lead
    * to a client exception, a bulkload will always succeed in this case by default, thus client
    * will never be aware that failures have ever happened . To avoid this kind of retry ,
@@ -187,23 +186,23 @@ public class TestSecureBulkLoadManager {
    * once, and server-side failures, if any ,can be checked via data.
    */
   class MyExceptionToAvoidRetry extends DoNotRetryIOException {
+
+    private static final long serialVersionUID = -6802760664998771151L;
   }
 
   private void doBulkloadWithoutRetry(Path dir) throws Exception {
-    Connection connection = testUtil.getConnection();
-    LoadIncrementalHFiles h = new LoadIncrementalHFiles(conf) {
+    BulkLoadHFilesTool h = new BulkLoadHFilesTool(conf) {
+
       @Override
-      protected void bulkLoadPhase(final Table htable, final Connection conn,
-          ExecutorService pool, Deque<LoadQueueItem> queue,
-          final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile,
-          Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
-        super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, copyFile, item2RegionMap);
+      protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName,
+          Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups,
+          boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
+        super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
         throw new MyExceptionToAvoidRetry(); // throw exception to avoid retry
       }
     };
     try {
-      h.doBulkLoad(dir, testUtil.getAdmin(), connection.getTable(TABLE),
-          connection.getRegionLocator(TABLE));
+      h.bulkLoad(TABLE, dir);
       Assert.fail("MyExceptionToAvoidRetry is expected");
     } catch (MyExceptionToAvoidRetry e) { //expected
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
index 37ca7dc..f546058 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
@@ -67,7 +67,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.replication.regionserver.TestSourceFSConfigurationProvider;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.HFileTestUtil;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -622,9 +622,7 @@ public class TestMasterReplication {
 
     Table source = tables[masterNumber];
     final TableName tableName = source.getName();
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
-    String[] args = { dir.toString(), tableName.toString() };
-    loader.run(args);
+    BulkLoadHFiles.create(util.getConfiguration()).bulkLoad(tableName, dir);
 
     if (toValidate) {
       for (int slaveClusterNumber : slaveNumbers) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
index 2d6c28f..eb3a7a0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -275,7 +275,7 @@ public class TestReplicationSink {
   }
 
   @Test
-  public void testRethrowRetriesExhaustedWithDetailsException() throws Exception {
+  public void testRethrowRetriesExhaustedException() throws Exception {
     TableName notExistTable = TableName.valueOf("notExistTable");
     List<WALEntry> entries = new ArrayList<>();
     List<Cell> cells = new ArrayList<>();
@@ -300,7 +300,7 @@ public class TestReplicationSink {
           SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
             replicationClusterId, baseNamespaceDir, hfileArchiveDir);
           Assert.fail("Should re-throw RetriesExhaustedWithDetailsException.");
-        } catch (RetriesExhaustedWithDetailsException e) {
+        } catch (RetriesExhaustedException e) {
         } finally {
           admin.enableTable(TABLE_NAME1);
         }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
index ff46a98..41d4f46 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
@@ -21,47 +21,31 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.net.SocketAddress;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellBuilder;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.BufferedMutator;
-import org.apache.hadoop.hbase.client.BufferedMutatorParams;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.AsyncTable;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
+import org.apache.hadoop.hbase.client.DummyAsyncClusterConnection;
+import org.apache.hadoop.hbase.client.DummyAsyncRegistry;
+import org.apache.hadoop.hbase.client.DummyAsyncTable;
 import org.apache.hadoop.hbase.client.Row;
-import org.apache.hadoop.hbase.client.RowMutations;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -81,15 +65,16 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 /**
  * Simple test of sink-side wal entry filter facility.
  */
-@Category({ReplicationTests.class, SmallTests.class})
+@Category({ ReplicationTests.class, SmallTests.class })
 public class TestWALEntrySinkFilter {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestWALEntrySinkFilter.class);
+    HBaseClassTestRule.forClass(TestWALEntrySinkFilter.class);
 
   private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSink.class);
-  @Rule public TestName name = new TestName();
+  @Rule
+  public TestName name = new TestName();
   static final int BOUNDARY = 5;
   static final AtomicInteger UNFILTERED = new AtomicInteger();
   static final AtomicInteger FILTERED = new AtomicInteger();
@@ -113,55 +98,48 @@ public class TestWALEntrySinkFilter {
   };
 
   /**
-   * Test filter.
-   * Filter will filter out any write time that is <= 5 (BOUNDARY). We count how many items we
-   * filter out and we count how many cells make it through for distribution way down below in the
-   * Table#batch implementation. Puts in place a custom DevNullConnection so we can insert our
-   * counting Table.
+   * Test filter. Filter will filter out any write time that is <= 5 (BOUNDARY). We count how many
+   * items we filter out and we count how many cells make it through for distribution way down below
+   * in the Table#batch implementation. Puts in place a custom DevNullConnection so we can insert
+   * our counting Table.
    * @throws IOException
    */
   @Test
   public void testWALEntryFilter() throws IOException {
     Configuration conf = HBaseConfiguration.create();
     // Make it so our filter is instantiated on construction of ReplicationSink.
+    conf.setClass(DummyAsyncRegistry.REGISTRY_IMPL_CONF_KEY, DevNullAsyncRegistry.class,
+      DummyAsyncRegistry.class);
     conf.setClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY,
-        IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl.class, WALEntrySinkFilter.class);
-    conf.setClass("hbase.client.connection.impl", DevNullConnection.class,
-      Connection.class);
+      IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl.class, WALEntrySinkFilter.class);
+    conf.setClass(ClusterConnectionFactory.HBASE_SERVER_CLUSTER_CONNECTION_IMPL,
+      DevNullAsyncClusterConnection.class, AsyncClusterConnection.class);
     ReplicationSink sink = new ReplicationSink(conf, STOPPABLE);
     // Create some dumb walentries.
-    List< org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry > entries =
-        new ArrayList<>();
+    List<AdminProtos.WALEntry> entries = new ArrayList<>();
     AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder();
     // Need a tablename.
     ByteString tableName =
-        ByteString.copyFromUtf8(TableName.valueOf(this.name.getMethodName()).toString());
+      ByteString.copyFromUtf8(TableName.valueOf(this.name.getMethodName()).toString());
     // Add WALEdit Cells to Cells List. The way edits arrive at the sink is with protos
     // describing the edit with all Cells from all edits aggregated in a single CellScanner.
     final List<Cell> cells = new ArrayList<>();
     int count = BOUNDARY * 2;
-    for(int i = 0; i < count; i++) {
-      byte [] bytes = Bytes.toBytes(i);
+    for (int i = 0; i < count; i++) {
+      byte[] bytes = Bytes.toBytes(i);
       // Create a wal entry. Everything is set to the current index as bytes or int/long.
       entryBuilder.clear();
-      entryBuilder.setKey(entryBuilder.getKeyBuilder().
-          setLogSequenceNumber(i).
-          setEncodedRegionName(ByteString.copyFrom(bytes)).
-          setWriteTime(i).
-          setTableName(tableName).build());
+      entryBuilder.setKey(entryBuilder.getKeyBuilder().setLogSequenceNumber(i)
+        .setEncodedRegionName(ByteString.copyFrom(bytes)).setWriteTime(i).setTableName(tableName)
+        .build());
       // Lets have one Cell associated with each WALEdit.
       entryBuilder.setAssociatedCellCount(1);
       entries.add(entryBuilder.build());
       // We need to add a Cell per WALEdit to the cells array.
       CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
       // Make cells whose row, family, cell, value, and ts are == 'i'.
-      Cell cell = cellBuilder.
-          setRow(bytes).
-          setFamily(bytes).
-          setQualifier(bytes).
-          setType(Cell.Type.Put).
-          setTimestamp(i).
-          setValue(bytes).build();
+      Cell cell = cellBuilder.setRow(bytes).setFamily(bytes).setQualifier(bytes)
+        .setType(Cell.Type.Put).setTimestamp(i).setValue(bytes).build();
       cells.add(cell);
     }
     // Now wrap our cells array in a CellScanner that we can pass in to replicateEntries. It has
@@ -192,11 +170,13 @@ public class TestWALEntrySinkFilter {
   /**
    * Simple filter that will filter out any entry wholse writeTime is <= 5.
    */
-  public static class IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl implements WALEntrySinkFilter {
-    public IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl() {}
+  public static class IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl
+      implements WALEntrySinkFilter {
+    public IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl() {
+    }
 
     @Override
-    public void init(Connection connection) {
+    public void init(AsyncConnection conn) {
       // Do nothing.
     }
 
@@ -210,335 +190,48 @@ public class TestWALEntrySinkFilter {
     }
   }
 
-  /**
-   * A DevNull Connection whose only purpose is checking what edits made it through. See down in
-   * {@link Table#batch(List, Object[])}.
-   */
-  public static class DevNullConnection implements Connection {
-    private final Configuration configuration;
+  public static class DevNullAsyncRegistry extends DummyAsyncRegistry {
 
-    DevNullConnection(Configuration configuration, ExecutorService es, User user) {
-      this.configuration = configuration;
+    public DevNullAsyncRegistry(Configuration conf) {
     }
 
     @Override
-    public void abort(String why, Throwable e) {
-
-    }
-
-    @Override
-    public boolean isAborted() {
-      return false;
-    }
-
-    @Override
-    public Configuration getConfiguration() {
-      return this.configuration;
-    }
-
-    @Override
-    public BufferedMutator getBufferedMutator(TableName tableName) throws IOException {
-      return null;
-    }
-
-    @Override
-    public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException {
-      return null;
-    }
-
-    @Override
-    public RegionLocator getRegionLocator(TableName tableName) throws IOException {
-      return null;
-    }
-
-    @Override
-    public Admin getAdmin() throws IOException {
-      return null;
+    public CompletableFuture<String> getClusterId() {
+      return CompletableFuture.completedFuture("test");
     }
+  }
 
-    @Override
-    public void close() throws IOException {
+  public static class DevNullAsyncClusterConnection extends DummyAsyncClusterConnection {
 
-    }
+    private final Configuration conf;
 
-    @Override
-    public boolean isClosed() {
-      return false;
+    public DevNullAsyncClusterConnection(Configuration conf, Object registry, String clusterId,
+        SocketAddress localAddress, User user) {
+      this.conf = conf;
     }
 
     @Override
-    public TableBuilder getTableBuilder(final TableName tableName, ExecutorService pool) {
-      return new TableBuilder() {
-        @Override
-        public TableBuilder setOperationTimeout(int timeout) {
-          return this;
-        }
+    public AsyncTable<AdvancedScanResultConsumer> getTable(TableName tableName) {
+      return new DummyAsyncTable<AdvancedScanResultConsumer>() {
 
         @Override
-        public TableBuilder setRpcTimeout(int timeout) {
-          return this;
-        }
-
-        @Override
-        public TableBuilder setReadRpcTimeout(int timeout) {
-          return this;
-        }
-
-        @Override
-        public TableBuilder setWriteRpcTimeout(int timeout) {
-          return this;
-        }
-
-        @Override
-        public Table build() {
-          return new Table() {
-            @Override
-            public TableName getName() {
-              return tableName;
-            }
-
-            @Override
-            public Configuration getConfiguration() {
-              return configuration;
-            }
-
-            @Override
-            public HTableDescriptor getTableDescriptor() throws IOException {
-              return null;
-            }
-
-            @Override
-            public TableDescriptor getDescriptor() throws IOException {
-              return null;
-            }
-
-            @Override
-            public boolean exists(Get get) throws IOException {
-              return false;
-            }
-
-            @Override
-            public boolean[] exists(List<Get> gets) throws IOException {
-              return new boolean[0];
-            }
-
-            @Override
-            public void batch(List<? extends Row> actions, Object[] results) throws IOException, InterruptedException {
-              for (Row action: actions) {
-                // Row is the index of the loop above where we make WALEntry and Cells.
-                int row = Bytes.toInt(action.getRow());
-                assertTrue("" + row, row> BOUNDARY);
-                UNFILTERED.incrementAndGet();
-              }
-            }
-
-            @Override
-            public <R> void batchCallback(List<? extends Row> actions, Object[] results, Batch.Callback<R> callback) throws IOException, InterruptedException {
-
-            }
-
-            @Override
-            public Result get(Get get) throws IOException {
-              return null;
-            }
-
-            @Override
-            public Result[] get(List<Get> gets) throws IOException {
-              return new Result[0];
-            }
-
-            @Override
-            public ResultScanner getScanner(Scan scan) throws IOException {
-              return null;
-            }
-
-            @Override
-            public ResultScanner getScanner(byte[] family) throws IOException {
-              return null;
-            }
-
-            @Override
-            public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
-              return null;
-            }
-
-            @Override
-            public void put(Put put) throws IOException {
-
-            }
-
-            @Override
-            public void put(List<Put> puts) throws IOException {
-
-            }
-
-            @Override
-            public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException {
-              return false;
-            }
-
-            @Override
-            public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Put put) throws IOException {
-              return false;
-            }
-
-            @Override
-            public void delete(Delete delete) throws IOException {
-
-            }
-
-            @Override
-            public void delete(List<Delete> deletes) throws IOException {
-
-            }
-
-            @Override
-            public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete) throws IOException {
-              return false;
-            }
-
-            @Override
-            public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Delete delete) throws IOException {
-              return false;
-            }
-
-            @Override
-            public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
-              return null;
-            }
-
-            @Override
-            public void mutateRow(RowMutations rm) throws IOException {
-
-            }
-
-            @Override
-            public Result append(Append append) throws IOException {
-              return null;
-            }
-
-            @Override
-            public Result increment(Increment increment) throws IOException {
-              return null;
-            }
-
-            @Override
-            public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException {
-              return 0;
-            }
-
-            @Override
-            public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability) throws IOException {
-              return 0;
-            }
-
-            @Override
-            public void close() throws IOException {
-
-            }
-
-            @Override
-            public CoprocessorRpcChannel coprocessorService(byte[] row) {
-              return null;
-            }
-
-            @Override
-            public <T extends com.google.protobuf.Service, R> Map<byte[], R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, Batch.Call<T, R> callable) throws com.google.protobuf.ServiceException, Throwable {
-              return null;
-            }
-
-            @Override
-            public <T extends com.google.protobuf.Service, R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, Batch.Call<T, R> callable, Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable {
-
-            }
-
-            @Override
-            public <R extends com.google.protobuf.Message> Map<byte[], R> batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws com.google.protobuf.ServiceException, Throwable {
-              return null;
-            }
-
-            @Override
-            public <R extends com.google.protobuf.Message> void batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype, Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable {
-
-            }
-
-            @Override
-            public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, RowMutations mutation) throws IOException {
-              return false;
-            }
-
-            @Override
-            public long getRpcTimeout(TimeUnit unit) {
-              return 0;
-            }
-
-            @Override
-            public int getRpcTimeout() {
-              return 0;
-            }
-
-            @Override
-            public void setRpcTimeout(int rpcTimeout) {
-
-            }
-
-            @Override
-            public long getReadRpcTimeout(TimeUnit unit) {
-              return 0;
-            }
-
-            @Override
-            public int getReadRpcTimeout() {
-              return 0;
-            }
-
-            @Override
-            public void setReadRpcTimeout(int readRpcTimeout) {
-
-            }
-
-            @Override
-            public long getWriteRpcTimeout(TimeUnit unit) {
-              return 0;
-            }
-
-            @Override
-            public int getWriteRpcTimeout() {
-              return 0;
-            }
-
-            @Override
-            public void setWriteRpcTimeout(int writeRpcTimeout) {
-
-            }
-
-            @Override
-            public long getOperationTimeout(TimeUnit unit) {
-              return 0;
-            }
-
-            @Override
-            public int getOperationTimeout() {
-              return 0;
-            }
-
-            @Override
-            public void setOperationTimeout(int operationTimeout) {
-            }
-
-            @Override
-            public RegionLocator getRegionLocator() throws IOException {
-              return null;
-            }
-          };
+        public <T> CompletableFuture<List<T>> batchAll(List<? extends Row> actions) {
+          List<T> list = new ArrayList<>(actions.size());
+          for (Row action : actions) {
+            // Row is the index of the loop above where we make WALEntry and Cells.
+            int row = Bytes.toInt(action.getRow());
+            assertTrue("" + row, row > BOUNDARY);
+            UNFILTERED.incrementAndGet();
+            list.add(null);
+          }
+          return CompletableFuture.completedFuture(list);
         }
       };
     }
 
     @Override
-    public void clearRegionLocationCache() {
+    public Configuration getConfiguration() {
+      return conf;
     }
   }
 }
-
-
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 523b82f..b9de705 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -124,7 +124,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Threads;
@@ -1111,14 +1111,8 @@ public class TestAccessController extends SecureTestUtil {
     }
 
     private void bulkLoadHFile(TableName tableName) throws Exception {
-      try (Connection conn = ConnectionFactory.createConnection(conf);
-          Admin admin = conn.getAdmin();
-          RegionLocator locator = conn.getRegionLocator(tableName);
-          Table table = conn.getTable(tableName)) {
-        TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
-        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
-        loader.doBulkLoad(loadPath, admin, table, locator);
-      }
+      TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
+      BulkLoadHFiles.create(conf).bulkLoad(tableName, loadPath);
     }
 
     private static void setPermission(FileSystem fs, Path dir, FsPermission perm)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java
similarity index 83%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java
index 7c04edc..e85fc1a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.tool;
 
+import static org.apache.hadoop.hbase.HBaseTestingUtility.countRows;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -72,11 +73,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
  * faster than the full MR cluster tests in TestHFileOutputFormat
  */
 @Category({ MiscTests.class, LargeTests.class })
-public class TestLoadIncrementalHFiles {
+public class TestBulkLoadHFiles {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestLoadIncrementalHFiles.class);
+    HBaseClassTestRule.forClass(TestBulkLoadHFiles.class);
 
   @Rule
   public TestName tn = new TestName();
@@ -89,14 +90,14 @@ public class TestLoadIncrementalHFiles {
   static final int MAX_FILES_PER_REGION_PER_FAMILY = 4;
 
   private static final byte[][] SPLIT_KEYS =
-      new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ppp") };
+    new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ppp") };
 
   static HBaseTestingUtility util = new HBaseTestingUtility();
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
-    util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
+    util.getConfiguration().setInt(BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
       MAX_FILES_PER_REGION_PER_FAMILY);
     // change default behavior so that tag values are returned with normal rpcs
     util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
@@ -119,7 +120,7 @@ public class TestLoadIncrementalHFiles {
   public void testSimpleLoadWithMap() throws Exception {
     runTest("testSimpleLoadWithMap", BloomType.NONE,
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
-          new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, },
+        new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, },
       true);
   }
 
@@ -130,16 +131,16 @@ public class TestLoadIncrementalHFiles {
   public void testSimpleLoad() throws Exception {
     runTest("testSimpleLoad", BloomType.NONE,
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
-          new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, });
+        new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, });
   }
 
   @Test
   public void testSimpleLoadWithFileCopy() throws Exception {
     String testName = tn.getMethodName();
     final byte[] TABLE_NAME = Bytes.toBytes("mytable_" + testName);
-    runTest(testName, buildHTD(TableName.valueOf(TABLE_NAME), BloomType.NONE),
-        false, null, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
-          new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, },
+    runTest(testName, buildHTD(TableName.valueOf(TABLE_NAME), BloomType.NONE), false, null,
+      new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
+        new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, },
       false, true, 2);
   }
 
@@ -150,7 +151,7 @@ public class TestLoadIncrementalHFiles {
   public void testRegionCrossingLoad() throws Exception {
     runTest("testRegionCrossingLoad", BloomType.NONE,
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
-          new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
+        new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
   }
 
   /**
@@ -160,7 +161,7 @@ public class TestLoadIncrementalHFiles {
   public void testRegionCrossingRowBloom() throws Exception {
     runTest("testRegionCrossingLoadRowBloom", BloomType.ROW,
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
-          new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
+        new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
   }
 
   /**
@@ -170,7 +171,7 @@ public class TestLoadIncrementalHFiles {
   public void testRegionCrossingRowColBloom() throws Exception {
     runTest("testRegionCrossingLoadRowColBloom", BloomType.ROWCOL,
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
-          new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
+        new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
   }
 
   /**
@@ -181,9 +182,9 @@ public class TestLoadIncrementalHFiles {
   public void testSimpleHFileSplit() throws Exception {
     runTest("testHFileSplit", BloomType.NONE,
       new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"),
-          Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), },
+        Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), },
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("lll") },
-          new byte[][] { Bytes.toBytes("mmm"), Bytes.toBytes("zzz") }, });
+        new byte[][] { Bytes.toBytes("mmm"), Bytes.toBytes("zzz") }, });
   }
 
   /**
@@ -217,27 +218,27 @@ public class TestLoadIncrementalHFiles {
   public void testSplitALot() throws Exception {
     runTest("testSplitALot", BloomType.NONE,
       new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"),
-          Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), Bytes.toBytes("ggg"),
-          Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"),
-          Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
-          Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"),
-          Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), },
+        Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), Bytes.toBytes("ggg"),
+        Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"),
+        Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
+        Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"),
+        Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), },
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("zzz") }, });
   }
 
   private void testRegionCrossingHFileSplit(BloomType bloomType) throws Exception {
     runTest("testHFileSplit" + bloomType + "Bloom", bloomType,
       new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"),
-          Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), },
+        Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), },
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
-          new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
+        new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
   }
 
   private TableDescriptor buildHTD(TableName tableName, BloomType bloomType) {
     return TableDescriptorBuilder.newBuilder(tableName)
-        .setColumnFamily(
-          ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBloomFilterType(bloomType).build())
-        .build();
+      .setColumnFamily(
+        ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBloomFilterType(bloomType).build())
+      .build();
   }
 
   private void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges)
@@ -265,28 +266,24 @@ public class TestLoadIncrementalHFiles {
     runTest(testName, TABLE_WITHOUT_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges,
       useMap, 2);
 
-
-    /* Run the test bulkloading the table from a depth of 3
-      directory structure is now
-      baseDirectory
-          -- regionDir
-            -- familyDir
-              -- storeFileDir
-    */
+    /*
+     * Run the test bulkloading the table from a depth of 3 directory structure is now baseDirectory
+     * -- regionDir -- familyDir -- storeFileDir
+     */
     if (preCreateTable) {
-      runTest(testName + 2, TABLE_WITHOUT_NS, bloomType, true, tableSplitKeys, hfileRanges,
-          false, 3);
+      runTest(testName + 2, TABLE_WITHOUT_NS, bloomType, true, tableSplitKeys, hfileRanges, false,
+        3);
     }
 
     // Run the test bulkloading the table to the specified namespace
     final TableName TABLE_WITH_NS = TableName.valueOf(Bytes.toBytes(NAMESPACE), TABLE_NAME);
-    runTest(testName, TABLE_WITH_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges,
-      useMap, 2);
+    runTest(testName, TABLE_WITH_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges, useMap,
+      2);
   }
 
   private void runTest(String testName, TableName tableName, BloomType bloomType,
-      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges,
-      boolean useMap, int depth) throws Exception {
+      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap,
+      int depth) throws Exception {
     TableDescriptor htd = buildHTD(tableName, bloomType);
     runTest(testName, htd, preCreateTable, tableSplitKeys, hfileRanges, useMap, false, depth);
   }
@@ -296,7 +293,7 @@ public class TestLoadIncrementalHFiles {
       byte[][][] hfileRanges, boolean useMap, boolean deleteFile, boolean copyFiles,
       int initRowCount, int factor) throws Exception {
     return loadHFiles(testName, htd, util, fam, qual, preCreateTable, tableSplitKeys, hfileRanges,
-        useMap, deleteFile, copyFiles, initRowCount, factor, 2);
+      useMap, deleteFile, copyFiles, initRowCount, factor, 2);
   }
 
   public static int loadHFiles(String testName, TableDescriptor htd, HBaseTestingUtility util,
@@ -343,7 +340,7 @@ public class TestLoadIncrementalHFiles {
 
     Configuration conf = util.getConfiguration();
     if (copyFiles) {
-      conf.setBoolean(LoadIncrementalHFiles.ALWAYS_COPY_FILES, true);
+      conf.setBoolean(BulkLoadHFiles.ALWAYS_COPY_FILES, true);
     }
     BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf);
     List<String> args = Lists.newArrayList(baseDirectory.toString(), tableName.toString());
@@ -374,26 +371,23 @@ public class TestLoadIncrementalHFiles {
       }
     }
 
-    Table table = util.getConnection().getTable(tableName);
-    try {
-      assertEquals(initRowCount + expectedRows, util.countRows(table));
-    } finally {
-      table.close();
+    try (Table table = util.getConnection().getTable(tableName)) {
+      assertEquals(initRowCount + expectedRows, countRows(table));
     }
 
     return expectedRows;
   }
 
-  private void runTest(String testName, TableDescriptor htd,
-      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap,
-      boolean copyFiles, int depth) throws Exception {
+  private void runTest(String testName, TableDescriptor htd, boolean preCreateTable,
+      byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap, boolean copyFiles, int depth)
+      throws Exception {
     loadHFiles(testName, htd, util, FAMILY, QUALIFIER, preCreateTable, tableSplitKeys, hfileRanges,
       useMap, true, copyFiles, 0, 1000, depth);
 
     final TableName tableName = htd.getTableName();
     // verify staging folder has been cleaned up
     Path stagingBasePath =
-        new Path(FSUtils.getRootDir(util.getConfiguration()), HConstants.BULKLOAD_STAGING_DIR_NAME);
+      new Path(FSUtils.getRootDir(util.getConfiguration()), HConstants.BULKLOAD_STAGING_DIR_NAME);
     FileSystem fs = util.getTestFileSystem();
     if (fs.exists(stagingBasePath)) {
       FileStatus[] files = fs.listStatus(stagingBasePath);
@@ -419,7 +413,7 @@ public class TestLoadIncrementalHFiles {
     Path familyDir = new Path(dir, Bytes.toString(FAMILY));
     // table has these split points
     byte[][] tableSplitKeys = new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"),
-        Bytes.toBytes("jjj"), Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), };
+      Bytes.toBytes("jjj"), Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), };
 
     // creating an hfile that has values that span the split points.
     byte[] from = Bytes.toBytes("ddd");
@@ -432,13 +426,11 @@ public class TestLoadIncrementalHFiles {
     TableDescriptor htd = buildHTD(tableName, BloomType.NONE);
     util.getAdmin().createTable(htd, tableSplitKeys);
 
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
-    String[] args = { dir.toString(), tableName.toString() };
-    loader.run(args);
+    BulkLoadHFiles.create(util.getConfiguration()).bulkLoad(tableName, dir);
 
     Table table = util.getConnection().getTable(tableName);
     try {
-      assertEquals(expectedRows, util.countRows(table));
+      assertEquals(expectedRows, countRows(table));
       HFileTestUtil.verifyTags(table);
     } finally {
       table.close();
@@ -454,16 +446,16 @@ public class TestLoadIncrementalHFiles {
   public void testNonexistentColumnFamilyLoad() throws Exception {
     String testName = tn.getMethodName();
     byte[][][] hFileRanges =
-        new byte[][][] { new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("ccc") },
-            new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, };
+      new byte[][][] { new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("ccc") },
+        new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, };
 
     byte[] TABLE = Bytes.toBytes("mytable_" + testName);
     // set real family name to upper case in purpose to simulate the case that
     // family name in HFiles is invalid
     TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE))
-        .setColumnFamily(ColumnFamilyDescriptorBuilder
-            .of(Bytes.toBytes(new String(FAMILY).toUpperCase(Locale.ROOT))))
-        .build();
+      .setColumnFamily(ColumnFamilyDescriptorBuilder
+        .of(Bytes.toBytes(new String(FAMILY).toUpperCase(Locale.ROOT))))
+      .build();
 
     try {
       runTest(testName, htd, true, SPLIT_KEYS, hFileRanges, false, false, 2);
@@ -474,7 +466,7 @@ public class TestLoadIncrementalHFiles {
       String errMsg = e.getMessage();
       assertTrue(
         "Incorrect exception message, expected message: [" + EXPECTED_MSG_FOR_NON_EXISTING_FAMILY +
-            "], current message: [" + errMsg + "]",
+          "], current message: [" + errMsg + "]",
         errMsg.contains(EXPECTED_MSG_FOR_NON_EXISTING_FAMILY));
     }
   }
@@ -517,10 +509,8 @@ public class TestLoadIncrementalHFiles {
       } else {
         table = util.getConnection().getTable(TableName.valueOf(tableName));
       }
-
-      final String[] args = { dir.toString(), tableName };
-      new LoadIncrementalHFiles(util.getConfiguration()).run(args);
-      assertEquals(500, util.countRows(table));
+      BulkLoadHFiles.create(util.getConfiguration()).bulkLoad(TableName.valueOf(tableName), dir);
+      assertEquals(500, countRows(table));
     } finally {
       if (table != null) {
         table.close();
@@ -560,7 +550,7 @@ public class TestLoadIncrementalHFiles {
     Path bottomOut = new Path(dir, "bottom.out");
     Path topOut = new Path(dir, "top.out");
 
-    LoadIncrementalHFiles.splitStoreFile(util.getConfiguration(), testIn, familyDesc,
+    BulkLoadHFilesTool.splitStoreFile(util.getConfiguration(), testIn, familyDesc,
       Bytes.toBytes("ggg"), bottomOut, topOut);
 
     int rowCount = verifyHFile(bottomOut);
@@ -594,14 +584,14 @@ public class TestLoadIncrementalHFiles {
     FileSystem fs = util.getTestFileSystem();
     Path testIn = new Path(dir, "testhfile");
     ColumnFamilyDescriptor familyDesc =
-        ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setDataBlockEncoding(cfEncoding).build();
+      ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setDataBlockEncoding(cfEncoding).build();
     HFileTestUtil.createHFileWithDataBlockEncoding(util.getConfiguration(), fs, testIn,
       bulkloadEncoding, FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);
 
     Path bottomOut = new Path(dir, "bottom.out");
     Path topOut = new Path(dir, "top.out");
 
-    LoadIncrementalHFiles.splitStoreFile(util.getConfiguration(), testIn, familyDesc,
+    BulkLoadHFilesTool.splitStoreFile(util.getConfiguration(), testIn, familyDesc,
       Bytes.toBytes("ggg"), bottomOut, topOut);
 
     int rowCount = verifyHFile(bottomOut);
@@ -612,7 +602,7 @@ public class TestLoadIncrementalHFiles {
   private int verifyHFile(Path p) throws IOException {
     Configuration conf = util.getConfiguration();
     HFile.Reader reader =
-        HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf);
+      HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf);
     reader.loadFileInfo();
     HFileScanner scanner = reader.getScanner(false, false);
     scanner.seekTo();
@@ -682,7 +672,7 @@ public class TestLoadIncrementalHFiles {
     last = "w";
     addStartEndKeysForTest(map, Bytes.toBytes(first), Bytes.toBytes(last));
 
-    byte[][] keysArray = LoadIncrementalHFiles.inferBoundaries(map);
+    byte[][] keysArray = BulkLoadHFilesTool.inferBoundaries(map);
     byte[][] compare = new byte[3][];
     compare[0] = Bytes.toBytes("m");
     compare[1] = Bytes.toBytes("r");
@@ -709,22 +699,21 @@ public class TestLoadIncrementalHFiles {
         FAMILY, QUALIFIER, from, to, 1000);
     }
 
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
-    String[] args = { dir.toString(), "mytable_testLoadTooMayHFiles" };
     try {
-      loader.run(args);
+      BulkLoadHFiles.create(util.getConfiguration())
+        .bulkLoad(TableName.valueOf("mytable_testLoadTooMayHFiles"), dir);
       fail("Bulk loading too many files should fail");
     } catch (IOException ie) {
       assertTrue(ie.getMessage()
-          .contains("Trying to load more than " + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
+        .contains("Trying to load more than " + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
     }
   }
 
   @Test(expected = TableNotFoundException.class)
   public void testWithoutAnExistingTableAndCreateTableSetToNo() throws Exception {
     Configuration conf = util.getConfiguration();
-    conf.set(LoadIncrementalHFiles.CREATE_TABLE_CONF_KEY, "no");
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
+    conf.set(BulkLoadHFiles.CREATE_TABLE_CONF_KEY, "no");
+    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf);
     String[] args = { "directory", "nonExistingTable" };
     loader.run(args);
   }
@@ -741,19 +730,11 @@ public class TestLoadIncrementalHFiles {
     byte[] to = Bytes.toBytes("end");
     Configuration conf = util.getConfiguration();
     String tableName = tn.getMethodName();
-    Table table = util.createTable(TableName.valueOf(tableName), family);
-    HFileTestUtil.createHFile(conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family),
-      QUALIFIER, from, to, 1000);
-
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
-    String[] args = { dir.toString(), tableName };
-    try {
-      loader.run(args);
-      assertEquals(1000, util.countRows(table));
-    } finally {
-      if (null != table) {
-        table.close();
-      }
+    try (Table table = util.createTable(TableName.valueOf(tableName), family)) {
+      HFileTestUtil.createHFile(conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family),
+        QUALIFIER, from, to, 1000);
+      BulkLoadHFiles.create(conf).bulkLoad(table.getName(), dir);
+      assertEquals(1000, countRows(table));
     }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java
new file mode 100644
index 0000000..2aef16e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java
@@ -0,0 +1,486 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.tool;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Deque;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.IntStream;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
+/**
+ * Test cases for the atomic load error handling of the bulk load functionality.
+ */
+@Category({ MiscTests.class, LargeTests.class })
+public class TestBulkLoadHFilesSplitRecovery {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestBulkLoadHFilesSplitRecovery.class);
+
+  private static final Logger LOG = LoggerFactory.getLogger(TestHRegionServerBulkLoad.class);
+
+  static HBaseTestingUtility util;
+  // used by secure subclass
+  static boolean useSecure = false;
+
+  final static int NUM_CFS = 10;
+  final static byte[] QUAL = Bytes.toBytes("qual");
+  final static int ROWCOUNT = 100;
+
+  private final static byte[][] families = new byte[NUM_CFS][];
+
+  @Rule
+  public TestName name = new TestName();
+
+  static {
+    for (int i = 0; i < NUM_CFS; i++) {
+      families[i] = Bytes.toBytes(family(i));
+    }
+  }
+
+  static byte[] rowkey(int i) {
+    return Bytes.toBytes(String.format("row_%08d", i));
+  }
+
+  static String family(int i) {
+    return String.format("family_%04d", i);
+  }
+
+  static byte[] value(int i) {
+    return Bytes.toBytes(String.format("%010d", i));
+  }
+
+  public static void buildHFiles(FileSystem fs, Path dir, int value) throws IOException {
+    byte[] val = value(value);
+    for (int i = 0; i < NUM_CFS; i++) {
+      Path testIn = new Path(dir, family(i));
+
+      TestHRegionServerBulkLoad.createHFile(fs, new Path(testIn, "hfile_" + i),
+        Bytes.toBytes(family(i)), QUAL, val, ROWCOUNT);
+    }
+  }
+
+  private TableDescriptor createTableDesc(TableName name, int cfs) {
+    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name);
+    IntStream.range(0, cfs).mapToObj(i -> ColumnFamilyDescriptorBuilder.of(family(i)))
+      .forEachOrdered(builder::setColumnFamily);
+    return builder.build();
+  }
+
+  /**
+   * Creates a table with given table name and specified number of column families if the table does
+   * not already exist.
+   */
+  private void setupTable(final Connection connection, TableName table, int cfs)
+      throws IOException {
+    try {
+      LOG.info("Creating table " + table);
+      try (Admin admin = connection.getAdmin()) {
+        admin.createTable(createTableDesc(table, cfs));
+      }
+    } catch (TableExistsException tee) {
+      LOG.info("Table " + table + " already exists");
+    }
+  }
+
+  /**
+   * Creates a table with given table name,specified number of column families<br>
+   * and splitkeys if the table does not already exist.
+   * @param table
+   * @param cfs
+   * @param SPLIT_KEYS
+   */
+  private void setupTableWithSplitkeys(TableName table, int cfs, byte[][] SPLIT_KEYS)
+      throws IOException {
+    try {
+      LOG.info("Creating table " + table);
+      util.createTable(createTableDesc(table, cfs), SPLIT_KEYS);
+    } catch (TableExistsException tee) {
+      LOG.info("Table " + table + " already exists");
+    }
+  }
+
+  private Path buildBulkFiles(TableName table, int value) throws Exception {
+    Path dir = util.getDataTestDirOnTestFS(table.getNameAsString());
+    Path bulk1 = new Path(dir, table.getNameAsString() + value);
+    FileSystem fs = util.getTestFileSystem();
+    buildHFiles(fs, bulk1, value);
+    return bulk1;
+  }
+
+  /**
+   * Populate table with known values.
+   */
+  private void populateTable(final Connection connection, TableName table, int value)
+      throws Exception {
+    // create HFiles for different column families
+    Path dir = buildBulkFiles(table, value);
+    BulkLoadHFiles.create(util.getConfiguration()).bulkLoad(table, dir);
+  }
+
+  /**
+   * Split the known table in half. (this is hard coded for this test suite)
+   */
+  private void forceSplit(TableName table) {
+    try {
+      // need to call regions server to by synchronous but isn't visible.
+      HRegionServer hrs = util.getRSForFirstRegionInTable(table);
+
+      for (RegionInfo hri : ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
+        if (hri.getTable().equals(table)) {
+          util.getAdmin().splitRegionAsync(hri.getRegionName(), rowkey(ROWCOUNT / 2));
+          // ProtobufUtil.split(null, hrs.getRSRpcServices(), hri, rowkey(ROWCOUNT / 2));
+        }
+      }
+
+      // verify that split completed.
+      int regions;
+      do {
+        regions = 0;
+        for (RegionInfo hri : ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
+          if (hri.getTable().equals(table)) {
+            regions++;
+          }
+        }
+        if (regions != 2) {
+          LOG.info("Taking some time to complete split...");
+          Thread.sleep(250);
+        }
+      } while (regions != 2);
+    } catch (IOException e) {
+      e.printStackTrace();
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    util = new HBaseTestingUtility();
+    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
+    util.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws Exception {
+    util.shutdownMiniCluster();
+  }
+
+  /**
+   * Checks that all columns have the expected value and that there is the expected number of rows.
+   * @throws IOException
+   */
+  void assertExpectedTable(TableName table, int count, int value) throws IOException {
+    TableDescriptor htd = util.getAdmin().getDescriptor(table);
+    assertNotNull(htd);
+    try (Table t = util.getConnection().getTable(table);
+        ResultScanner sr = t.getScanner(new Scan())) {
+      int i = 0;
+      for (Result r; (r = sr.next()) != null;) {
+        r.getNoVersionMap().values().stream().flatMap(m -> m.values().stream())
+          .forEach(v -> assertArrayEquals(value(value), v));
+        i++;
+      }
+      assertEquals(count, i);
+    } catch (IOException e) {
+      fail("Failed due to exception");
+    }
+  }
+
+  private static <T> CompletableFuture<T> failedFuture(Throwable error) {
+    CompletableFuture<T> future = new CompletableFuture<>();
+    future.completeExceptionally(error);
+    return future;
+  }
+
+  private static AsyncClusterConnection mockAndInjectError(AsyncClusterConnection conn) {
+    AsyncClusterConnection errConn = spy(conn);
+    doReturn(failedFuture(new IOException("injecting bulk load error"))).when(errConn)
+      .bulkLoad(any(), anyList(), any(), anyBoolean(), any(), any(), anyBoolean());
+    return errConn;
+  }
+
+  /**
+   * Test that shows that exception thrown from the RS side will result in an exception on the
+   * LIHFile client.
+   */
+  @Test(expected = IOException.class)
+  public void testBulkLoadPhaseFailure() throws Exception {
+    final TableName table = TableName.valueOf(name.getMethodName());
+    final AtomicInteger attemptedCalls = new AtomicInteger();
+    Configuration conf = new Configuration(util.getConfiguration());
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
+    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf) {
+
+      @Override
+      protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName,
+          Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups,
+          boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
+        AsyncClusterConnection c =
+          attemptedCalls.incrementAndGet() == 1 ? mockAndInjectError(conn) : conn;
+        super.bulkLoadPhase(c, tableName, queue, regionGroups, copyFiles, item2RegionMap);
+      }
+    };
+    Path dir = buildBulkFiles(table, 1);
+    loader.bulkLoad(table, dir);
+  }
+
+  /**
+   * Test that shows that exception thrown from the RS side will result in the expected number of
+   * retries set by ${@link HConstants#HBASE_CLIENT_RETRIES_NUMBER} when
+   * ${@link BulkLoadHFiles#RETRY_ON_IO_EXCEPTION} is set
+   */
+  @Test
+  public void testRetryOnIOException() throws Exception {
+    TableName table = TableName.valueOf(name.getMethodName());
+    AtomicInteger calls = new AtomicInteger(0);
+    setupTable(util.getConnection(), table, 10);
+    Configuration conf = new Configuration(util.getConfiguration());
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
+    conf.setBoolean(BulkLoadHFiles.RETRY_ON_IO_EXCEPTION, true);
+    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf) {
+
+      @Override
+      protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName,
+          Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups,
+          boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
+        if (calls.get() < conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+          HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER)) {
+          calls.incrementAndGet();
+          super.bulkLoadPhase(mockAndInjectError(conn), tableName, queue, regionGroups, copyFiles,
+            item2RegionMap);
+        } else {
+          super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
+        }
+      }
+    };
+    Path dir = buildBulkFiles(table, 1);
+    loader.bulkLoad(table, dir);
+    assertEquals(calls.get(), 2);
+  }
+
+  /**
+   * This test exercises the path where there is a split after initial validation but before the
+   * atomic bulk load call. We cannot use presplitting to test this path, so we actually inject a
+   * split just before the atomic region load.
+   */
+  @Test
+  public void testSplitWhileBulkLoadPhase() throws Exception {
+    final TableName table = TableName.valueOf(name.getMethodName());
+    setupTable(util.getConnection(), table, 10);
+    populateTable(util.getConnection(), table, 1);
+    assertExpectedTable(table, ROWCOUNT, 1);
+
+    // Now let's cause trouble. This will occur after checks and cause bulk
+    // files to fail when attempt to atomically import. This is recoverable.
+    final AtomicInteger attemptedCalls = new AtomicInteger();
+    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {
+
+      @Override
+      protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName,
+          Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups,
+          boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
+        int i = attemptedCalls.incrementAndGet();
+        if (i == 1) {
+          // On first attempt force a split.
+          forceSplit(table);
+        }
+        super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
+      }
+    };
+
+    // create HFiles for different column families
+    Path dir = buildBulkFiles(table, 2);
+    loader.bulkLoad(table, dir);
+
+    // check that data was loaded
+    // The three expected attempts are 1) failure because need to split, 2)
+    // load of split top 3) load of split bottom
+    assertEquals(3, attemptedCalls.get());
+    assertExpectedTable(table, ROWCOUNT, 2);
+  }
+
+  /**
+   * This test splits a table and attempts to bulk load. The bulk import files should be split
+   * before atomically importing.
+   */
+  @Test
+  public void testGroupOrSplitPresplit() throws Exception {
+    final TableName table = TableName.valueOf(name.getMethodName());
+    setupTable(util.getConnection(), table, 10);
+    populateTable(util.getConnection(), table, 1);
+    assertExpectedTable(util.getConnection(), table, ROWCOUNT, 1);
+    forceSplit(table);
+
+    final AtomicInteger countedLqis = new AtomicInteger();
+    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {
+
+      @Override
+      protected Pair<List<LoadQueueItem>, String> groupOrSplit(AsyncClusterConnection conn,
+          TableName tableName, Multimap<ByteBuffer, LoadQueueItem> regionGroups, LoadQueueItem item,
+          List<Pair<byte[], byte[]>> startEndKeys) throws IOException {
+        Pair<List<LoadQueueItem>, String> lqis =
+          super.groupOrSplit(conn, tableName, regionGroups, item, startEndKeys);
+        if (lqis != null && lqis.getFirst() != null) {
+          countedLqis.addAndGet(lqis.getFirst().size());
+        }
+        return lqis;
+      }
+    };
+
+    // create HFiles for different column families
+    Path dir = buildBulkFiles(table, 2);
+    loader.bulkLoad(table, dir);
+    assertExpectedTable(util.getConnection(), table, ROWCOUNT, 2);
+    assertEquals(20, countedLqis.get());
+  }
+
+  /**
+   * This test creates a table with many small regions. The bulk load files would be splitted
+   * multiple times before all of them can be loaded successfully.
+   */
+  @Test
+  public void testSplitTmpFileCleanUp() throws Exception {
+    final TableName table = TableName.valueOf(name.getMethodName());
+    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"),
+      Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"),
+      Bytes.toBytes("row_00000050") };
+    setupTableWithSplitkeys(table, 10, SPLIT_KEYS);
+
+    BulkLoadHFiles loader = BulkLoadHFiles.create(util.getConfiguration());
+
+    // create HFiles
+    Path dir = buildBulkFiles(table, 2);
+    loader.bulkLoad(table, dir);
+    // family path
+    Path tmpPath = new Path(dir, family(0));
+    // TMP_DIR under family path
+    tmpPath = new Path(tmpPath, BulkLoadHFilesTool.TMP_DIR);
+    FileSystem fs = dir.getFileSystem(util.getConfiguration());
+    // HFiles have been splitted, there is TMP_DIR
+    assertTrue(fs.exists(tmpPath));
+    // TMP_DIR should have been cleaned-up
+    assertNull(BulkLoadHFilesTool.TMP_DIR + " should be empty.", FSUtils.listStatus(fs, tmpPath));
+    assertExpectedTable(util.getConnection(), table, ROWCOUNT, 2);
+  }
+
+  /**
+   * This simulates an remote exception which should cause LIHF to exit with an exception.
+   */
+  @Test(expected = IOException.class)
+  public void testGroupOrSplitFailure() throws Exception {
+    final TableName tableName = TableName.valueOf(name.getMethodName());
+    setupTable(util.getConnection(), tableName, 10);
+    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {
+
+      private int i = 0;
+
+      @Override
+      protected Pair<List<LoadQueueItem>, String> groupOrSplit(AsyncClusterConnection conn,
+          TableName tableName, Multimap<ByteBuffer, LoadQueueItem> regionGroups, LoadQueueItem item,
+          List<Pair<byte[], byte[]>> startEndKeys) throws IOException {
+        i++;
+
+        if (i == 5) {
+          throw new IOException("failure");
+        }
+        return super.groupOrSplit(conn, tableName, regionGroups, item, startEndKeys);
+      }
+    };
+
+    // create HFiles for different column families
+    Path dir = buildBulkFiles(tableName, 1);
+    loader.bulkLoad(tableName, dir);
+  }
+
+  /**
+   * Checks that all columns have the expected value and that there is the expected number of rows.
+   */
+  void assertExpectedTable(final Connection connection, TableName table, int count, int value)
+      throws IOException {
+    TableDescriptor htd = util.getAdmin().getDescriptor(table);
+    assertNotNull(htd);
+    try (Table t = connection.getTable(table); ResultScanner sr = t.getScanner(new Scan())) {
+      int i = 0;
+      for (Result r; (r = sr.next()) != null;) {
+        r.getNoVersionMap().values().stream().flatMap(m -> m.values().stream())
+          .forEach(v -> assertArrayEquals(value(value), v));
+        i++;
+      }
+      assertEquals(count, i);
+    } catch (IOException e) {
+      fail("Failed due to exception");
+    }
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
deleted file mode 100644
index fcc1bb8..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
+++ /dev/null
@@ -1,630 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.tool;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Collection;
-import java.util.Deque;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.IntStream;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClientServiceCallable;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.log.HBaseMarkers;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-
-/**
- * Test cases for the atomic load error handling of the bulk load functionality.
- */
-@Category({ MiscTests.class, LargeTests.class })
-public class TestLoadIncrementalHFilesSplitRecovery {
-
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestLoadIncrementalHFilesSplitRecovery.class);
-
-  private static final Logger LOG = LoggerFactory.getLogger(TestHRegionServerBulkLoad.class);
-
-  static HBaseTestingUtility util;
-  // used by secure subclass
-  static boolean useSecure = false;
-
-  final static int NUM_CFS = 10;
-  final static byte[] QUAL = Bytes.toBytes("qual");
-  final static int ROWCOUNT = 100;
-
-  private final static byte[][] families = new byte[NUM_CFS][];
-
-  @Rule
-  public TestName name = new TestName();
-
-  static {
-    for (int i = 0; i < NUM_CFS; i++) {
-      families[i] = Bytes.toBytes(family(i));
-    }
-  }
-
-  static byte[] rowkey(int i) {
-    return Bytes.toBytes(String.format("row_%08d", i));
-  }
-
-  static String family(int i) {
-    return String.format("family_%04d", i);
-  }
-
-  static byte[] value(int i) {
-    return Bytes.toBytes(String.format("%010d", i));
-  }
-
-  public static void buildHFiles(FileSystem fs, Path dir, int value) throws IOException {
-    byte[] val = value(value);
-    for (int i = 0; i < NUM_CFS; i++) {
-      Path testIn = new Path(dir, family(i));
-
-      TestHRegionServerBulkLoad.createHFile(fs, new Path(testIn, "hfile_" + i),
-        Bytes.toBytes(family(i)), QUAL, val, ROWCOUNT);
-    }
-  }
-
-  private TableDescriptor createTableDesc(TableName name, int cfs) {
-    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name);
-    IntStream.range(0, cfs).mapToObj(i -> ColumnFamilyDescriptorBuilder.of(family(i)))
-        .forEachOrdered(builder::setColumnFamily);
-    return builder.build();
-  }
-
-  /**
-   * Creates a table with given table name and specified number of column families if the table does
-   * not already exist.
-   */
-  private void setupTable(final Connection connection, TableName table, int cfs)
-      throws IOException {
-    try {
-      LOG.info("Creating table " + table);
-      try (Admin admin = connection.getAdmin()) {
-        admin.createTable(createTableDesc(table, cfs));
-      }
-    } catch (TableExistsException tee) {
-      LOG.info("Table " + table + " already exists");
-    }
-  }
-
-  /**
-   * Creates a table with given table name,specified number of column families<br>
-   * and splitkeys if the table does not already exist.
-   * @param table
-   * @param cfs
-   * @param SPLIT_KEYS
-   */
-  private void setupTableWithSplitkeys(TableName table, int cfs, byte[][] SPLIT_KEYS)
-      throws IOException {
-    try {
-      LOG.info("Creating table " + table);
-      util.createTable(createTableDesc(table, cfs), SPLIT_KEYS);
-    } catch (TableExistsException tee) {
-      LOG.info("Table " + table + " already exists");
-    }
-  }
-
-  private Path buildBulkFiles(TableName table, int value) throws Exception {
-    Path dir = util.getDataTestDirOnTestFS(table.getNameAsString());
-    Path bulk1 = new Path(dir, table.getNameAsString() + value);
-    FileSystem fs = util.getTestFileSystem();
-    buildHFiles(fs, bulk1, value);
-    return bulk1;
-  }
-
-  /**
-   * Populate table with known values.
-   */
-  private void populateTable(final Connection connection, TableName table, int value)
-      throws Exception {
-    // create HFiles for different column families
-    LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration());
-    Path bulk1 = buildBulkFiles(table, value);
-    try (Table t = connection.getTable(table);
-        RegionLocator locator = connection.getRegionLocator(table);
-        Admin admin = connection.getAdmin()) {
-      lih.doBulkLoad(bulk1, admin, t, locator);
-    }
-  }
-
-  /**
-   * Split the known table in half. (this is hard coded for this test suite)
-   */
-  private void forceSplit(TableName table) {
-    try {
-      // need to call regions server to by synchronous but isn't visible.
-      HRegionServer hrs = util.getRSForFirstRegionInTable(table);
-
-      for (RegionInfo hri : ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
-        if (hri.getTable().equals(table)) {
-          util.getAdmin().splitRegionAsync(hri.getRegionName(), rowkey(ROWCOUNT / 2));
-          // ProtobufUtil.split(null, hrs.getRSRpcServices(), hri, rowkey(ROWCOUNT / 2));
-        }
-      }
-
-      // verify that split completed.
-      int regions;
-      do {
-        regions = 0;
-        for (RegionInfo hri : ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
-          if (hri.getTable().equals(table)) {
-            regions++;
-          }
-        }
-        if (regions != 2) {
-          LOG.info("Taking some time to complete split...");
-          Thread.sleep(250);
-        }
-      } while (regions != 2);
-    } catch (IOException e) {
-      e.printStackTrace();
-    } catch (InterruptedException e) {
-      e.printStackTrace();
-    }
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    util = new HBaseTestingUtility();
-    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
-    util.startMiniCluster(1);
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws Exception {
-    util.shutdownMiniCluster();
-  }
-
-  /**
-   * Checks that all columns have the expected value and that there is the expected number of rows.
-   * @throws IOException
-   */
-  void assertExpectedTable(TableName table, int count, int value) throws IOException {
-    TableDescriptor htd = util.getAdmin().getDescriptor(table);
-    assertNotNull(htd);
-    try (Table t = util.getConnection().getTable(table);
-        ResultScanner sr = t.getScanner(new Scan())) {
-      int i = 0;
-      for (Result r; (r = sr.next()) != null;) {
-        r.getNoVersionMap().values().stream().flatMap(m -> m.values().stream())
-            .forEach(v -> assertArrayEquals(value(value), v));
-        i++;
-      }
-      assertEquals(count, i);
-    } catch (IOException e) {
-      fail("Failed due to exception");
-    }
-  }
-
-  /**
-   * Test that shows that exception thrown from the RS side will result in an exception on the
-   * LIHFile client.
-   */
-  @Test(expected = IOException.class)
-  public void testBulkLoadPhaseFailure() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    final AtomicInteger attmptedCalls = new AtomicInteger();
-    final AtomicInteger failedCalls = new AtomicInteger();
-    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
-    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTable(connection, table, 10);
-      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
-        @Override
-        protected List<LoadQueueItem> tryAtomicRegionLoad(
-            ClientServiceCallable<byte[]> serviceCallable, TableName tableName, final byte[] first,
-            Collection<LoadQueueItem> lqis) throws IOException {
-          int i = attmptedCalls.incrementAndGet();
-          if (i == 1) {
-            Connection errConn;
-            try {
-              errConn = getMockedConnection(util.getConfiguration());
-              serviceCallable = this.buildClientServiceCallable(errConn, table, first, lqis, true);
-            } catch (Exception e) {
-              LOG.error(HBaseMarkers.FATAL, "mocking cruft, should never happen", e);
-              throw new RuntimeException("mocking cruft, should never happen");
-            }
-            failedCalls.incrementAndGet();
-            return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
-          }
-
-          return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
-        }
-      };
-      try {
-        // create HFiles for different column families
-        Path dir = buildBulkFiles(table, 1);
-        try (Table t = connection.getTable(table);
-            RegionLocator locator = connection.getRegionLocator(table);
-            Admin admin = connection.getAdmin()) {
-          lih.doBulkLoad(dir, admin, t, locator);
-        }
-      } finally {
-        util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-          HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-      }
-      fail("doBulkLoad should have thrown an exception");
-    }
-  }
-
-  /**
-   * Test that shows that exception thrown from the RS side will result in the expected number of
-   * retries set by ${@link HConstants#HBASE_CLIENT_RETRIES_NUMBER} when
-   * ${@link LoadIncrementalHFiles#RETRY_ON_IO_EXCEPTION} is set
-   */
-  @Test
-  public void testRetryOnIOException() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    final AtomicInteger calls = new AtomicInteger(0);
-    final Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
-    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
-    util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, true);
-    final LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
-      @Override
-      protected List<LoadQueueItem> tryAtomicRegionLoad(
-          ClientServiceCallable<byte[]> serverCallable, TableName tableName, final byte[] first,
-          Collection<LoadQueueItem> lqis) throws IOException {
-        if (calls.get() < util.getConfiguration().getInt(
-          HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER)) {
-          ClientServiceCallable<byte[]> newServerCallable = new ClientServiceCallable<byte[]>(conn,
-              tableName, first, new RpcControllerFactory(util.getConfiguration()).newController(),
-              HConstants.PRIORITY_UNSET) {
-            @Override
-            public byte[] rpcCall() throws Exception {
-              throw new IOException("Error calling something on RegionServer");
-            }
-          };
-          calls.getAndIncrement();
-          return super.tryAtomicRegionLoad(newServerCallable, tableName, first, lqis);
-        } else {
-          return super.tryAtomicRegionLoad(serverCallable, tableName, first, lqis);
-        }
-      }
-    };
-    setupTable(conn, table, 10);
-    Path dir = buildBulkFiles(table, 1);
-    lih.doBulkLoad(dir, conn.getAdmin(), conn.getTable(table), conn.getRegionLocator(table));
-    assertEquals(calls.get(), 2);
-    util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, false);
-  }
-
-  private ClusterConnection getMockedConnection(final Configuration conf)
-      throws IOException, org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
-    ServerName sn = ServerName.valueOf("example.org", 1234, 0);
-    RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
-    ClientProtos.ClientService.BlockingInterface client =
-      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
-    Mockito
-      .when(
-        client.bulkLoadHFile((RpcController) Mockito.any(), (BulkLoadHFileRequest) Mockito.any()))
-      .thenThrow(new ServiceException(new IOException("injecting bulk load error")));
-    return HConnectionTestingUtility.getMockedConnectionAndDecorate(conf, null, client, sn, hri);
-  }
-
-  /**
-   * This test exercises the path where there is a split after initial validation but before the
-   * atomic bulk load call. We cannot use presplitting to test this path, so we actually inject a
-   * split just before the atomic region load.
-   */
-  @Test
-  public void testSplitWhileBulkLoadPhase() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTable(connection, table, 10);
-      populateTable(connection, table, 1);
-      assertExpectedTable(table, ROWCOUNT, 1);
-
-      // Now let's cause trouble. This will occur after checks and cause bulk
-      // files to fail when attempt to atomically import. This is recoverable.
-      final AtomicInteger attemptedCalls = new AtomicInteger();
-      LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) {
-        @Override
-        protected void bulkLoadPhase(final Table htable, final Connection conn,
-            ExecutorService pool, Deque<LoadQueueItem> queue,
-            final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile,
-            Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
-          int i = attemptedCalls.incrementAndGet();
-          if (i == 1) {
-            // On first attempt force a split.
-            forceSplit(table);
-          }
-          super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, copyFile, item2RegionMap);
-        }
-      };
-
-      // create HFiles for different column families
-      try (Table t = connection.getTable(table);
-          RegionLocator locator = connection.getRegionLocator(table);
-          Admin admin = connection.getAdmin()) {
-        Path bulk = buildBulkFiles(table, 2);
-        lih2.doBulkLoad(bulk, admin, t, locator);
-      }
-
-      // check that data was loaded
-      // The three expected attempts are 1) failure because need to split, 2)
-      // load of split top 3) load of split bottom
-      assertEquals(3, attemptedCalls.get());
-      assertExpectedTable(table, ROWCOUNT, 2);
-    }
-  }
-
-  /**
-   * This test splits a table and attempts to bulk load. The bulk import files should be split
-   * before atomically importing.
-   */
-  @Test
-  public void testGroupOrSplitPresplit() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTable(connection, table, 10);
-      populateTable(connection, table, 1);
-      assertExpectedTable(connection, table, ROWCOUNT, 1);
-      forceSplit(table);
-
-      final AtomicInteger countedLqis = new AtomicInteger();
-      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
-        @Override
-        protected Pair<List<LoadQueueItem>, String> groupOrSplit(
-            Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item,
-            final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-          Pair<List<LoadQueueItem>, String> lqis =
-              super.groupOrSplit(regionGroups, item, htable, startEndKeys);
-          if (lqis != null && lqis.getFirst() != null) {
-            countedLqis.addAndGet(lqis.getFirst().size());
-          }
-          return lqis;
-        }
-      };
-
-      // create HFiles for different column families
-      Path bulk = buildBulkFiles(table, 2);
-      try (Table t = connection.getTable(table);
-          RegionLocator locator = connection.getRegionLocator(table);
-          Admin admin = connection.getAdmin()) {
-        lih.doBulkLoad(bulk, admin, t, locator);
-      }
-      assertExpectedTable(connection, table, ROWCOUNT, 2);
-      assertEquals(20, countedLqis.get());
-    }
-  }
-
-  /**
-   * This test creates a table with many small regions. The bulk load files would be splitted
-   * multiple times before all of them can be loaded successfully.
-   */
-  @Test
-  public void testSplitTmpFileCleanUp() throws Exception {
-    final TableName table = TableName.valueOf(name.getMethodName());
-    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"),
-        Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"),
-        Bytes.toBytes("row_00000050") };
-    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTableWithSplitkeys(table, 10, SPLIT_KEYS);
-
-      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration());
-
-      // create HFiles
-      Path bulk = buildBulkFiles(table, 2);
-      try (Table t = connection.getTable(table);
-          RegionLocator locator = connection.getRegionLocator(table);
-          Admin admin = connection.getAdmin()) {
-        lih.doBulkLoad(bulk, admin, t, locator);
-      }
-      // family path
-      Path tmpPath = new Path(bulk, family(0));
-      // TMP_DIR under family path
-      tmpPath = new Path(tmpPath, LoadIncrementalHFiles.TMP_DIR);
-      FileSystem fs = bulk.getFileSystem(util.getConfiguration());
-      // HFiles have been splitted, there is TMP_DIR
-      assertTrue(fs.exists(tmpPath));
-      // TMP_DIR should have been cleaned-up
-      assertNull(LoadIncrementalHFiles.TMP_DIR + " should be empty.",
-        FSUtils.listStatus(fs, tmpPath));
-      assertExpectedTable(connection, table, ROWCOUNT, 2);
-    }
-  }
-
-  /**
-   * This simulates an remote exception which should cause LIHF to exit with an exception.
-   */
-  @Test(expected = IOException.class)
-  public void testGroupOrSplitFailure() throws Exception {
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
-      setupTable(connection, tableName, 10);
-
-      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
-        int i = 0;
-
-        @Override
-        protected Pair<List<LoadQueueItem>, String> groupOrSplit(
-            Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item,
-            final Table table, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-          i++;
-
-          if (i == 5) {
-            throw new IOException("failure");
-          }
-          return super.groupOrSplit(regionGroups, item, table, startEndKeys);
-        }
-      };
-
-      // create HFiles for different column families
-      Path dir = buildBulkFiles(tableName, 1);
-      try (Table t = connection.getTable(tableName);
-          RegionLocator locator = connection.getRegionLocator(tableName);
-          Admin admin = connection.getAdmin()) {
-        lih.doBulkLoad(dir, admin, t, locator);
-      }
-    }
-
-    fail("doBulkLoad should have thrown an exception");
-  }
-
-  @Test
-  public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception {
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000100") };
-    // Share connection. We were failing to find the table with our new reverse scan because it
-    // looks for first region, not any region -- that is how it works now. The below removes first
-    // region in test. Was reliant on the Connection caching having first region.
-    Connection connection = ConnectionFactory.createConnection(util.getConfiguration());
-    Table table = connection.getTable(tableName);
-
-    setupTableWithSplitkeys(tableName, 10, SPLIT_KEYS);
-    Path dir = buildBulkFiles(tableName, 2);
-
-    final AtomicInteger countedLqis = new AtomicInteger();
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()) {
-
-      @Override
-      protected Pair<List<LoadQueueItem>, String> groupOrSplit(
-          Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item,
-          final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-        Pair<List<LoadQueueItem>, String> lqis =
-            super.groupOrSplit(regionGroups, item, htable, startEndKeys);
-        if (lqis != null && lqis.getFirst() != null) {
-          countedLqis.addAndGet(lqis.getFirst().size());
-        }
-        return lqis;
-      }
-    };
-
-    // do bulkload when there is no region hole in hbase:meta.
-    try (Table t = connection.getTable(tableName);
-        RegionLocator locator = connection.getRegionLocator(tableName);
-        Admin admin = connection.getAdmin()) {
-      loader.doBulkLoad(dir, admin, t, locator);
-    } catch (Exception e) {
-      LOG.error("exeception=", e);
-    }
-    // check if all the data are loaded into the table.
-    this.assertExpectedTable(tableName, ROWCOUNT, 2);
-
-    dir = buildBulkFiles(tableName, 3);
-
-    // Mess it up by leaving a hole in the hbase:meta
-    List<RegionInfo> regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
-    for (RegionInfo regionInfo : regionInfos) {
-      if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-        MetaTableAccessor.deleteRegion(connection, regionInfo);
-        break;
-      }
-    }
-
-    try (Table t = connection.getTable(tableName);
-        RegionLocator locator = connection.getRegionLocator(tableName);
-        Admin admin = connection.getAdmin()) {
-      loader.doBulkLoad(dir, admin, t, locator);
-    } catch (Exception e) {
-      LOG.error("exception=", e);
-      assertTrue("IOException expected", e instanceof IOException);
-    }
-
-    table.close();
-
-    // Make sure at least the one region that still exists can be found.
-    regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
-    assertTrue(regionInfos.size() >= 1);
-
-    this.assertExpectedTable(connection, tableName, ROWCOUNT, 2);
-    connection.close();
-  }
-
-  /**
-   * Checks that all columns have the expected value and that there is the expected number of rows.
-   * @throws IOException
-   */
-  void assertExpectedTable(final Connection connection, TableName table, int count, int value)
-      throws IOException {
-    TableDescriptor htd = util.getAdmin().getDescriptor(table);
-    assertNotNull(htd);
-    try (Table t = connection.getTable(table); ResultScanner sr = t.getScanner(new Scan())) {
-      int i = 0;
-      for (Result r; (r = sr.next()) != null;) {
-        r.getNoVersionMap().values().stream().flatMap(m -> m.values().stream())
-            .forEach(v -> assertArrayEquals(value(value), v));
-        i++;
-      }
-      assertEquals(count, i);
-    } catch (IOException e) {
-      fail("Failed due to exception");
-    }
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFiles.java
similarity index 88%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFiles.java
index e09b9ac..05785b4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFiles.java
@@ -31,20 +31,20 @@ import org.junit.ClassRule;
 import org.junit.experimental.categories.Category;
 
 /**
- * Reruns TestLoadIncrementalHFiles using LoadIncrementalHFiles in secure mode. This suite is unable
+ * Reruns TestBulkLoadHFiles using BulkLoadHFiles in secure mode. This suite is unable
  * to verify the security handoff/turnover as miniCluster is running as system user thus has root
  * privileges and delegation tokens don't seem to work on miniDFS.
- * <p>
+ * <p/>
  * Thus SecureBulkload can only be completely verified by running integration tests against a secure
  * cluster. This suite is still invaluable as it verifies the other mechanisms that need to be
  * supported as part of a LoadIncrementalFiles call.
  */
 @Category({ MiscTests.class, LargeTests.class })
-public class TestSecureLoadIncrementalHFiles extends TestLoadIncrementalHFiles {
+public class TestSecureBulkLoadHFiles extends TestBulkLoadHFiles {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestSecureLoadIncrementalHFiles.class);
+    HBaseClassTestRule.forClass(TestSecureBulkLoadHFiles.class);
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -53,7 +53,7 @@ public class TestSecureLoadIncrementalHFiles extends TestLoadIncrementalHFiles {
       HadoopSecurityEnabledUserProviderForTesting.class);
     // setup configuration
     SecureTestUtil.enableSecurity(util.getConfiguration());
-    util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
+    util.getConfiguration().setInt(BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
       MAX_FILES_PER_REGION_PER_FAMILY);
     // change default behavior so that tag values are returned with normal rpcs
     util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
@@ -66,5 +66,4 @@ public class TestSecureLoadIncrementalHFiles extends TestLoadIncrementalHFiles {
 
     setupNamespace();
   }
-
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFilesSplitRecovery.java
similarity index 90%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFilesSplitRecovery.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFilesSplitRecovery.java
index 03b9380..5943b0d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFilesSplitRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFilesSplitRecovery.java
@@ -31,21 +31,20 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
- * Reruns TestSecureLoadIncrementalHFilesSplitRecovery using LoadIncrementalHFiles in secure mode.
+ * Reruns TestBulkLoadHFilesSplitRecovery using BulkLoadHFiles in secure mode.
  * This suite is unable to verify the security handoff/turnove as miniCluster is running as system
  * user thus has root privileges and delegation tokens don't seem to work on miniDFS.
- * <p>
+ * <p/>
  * Thus SecureBulkload can only be completely verified by running integration tests against a secure
  * cluster. This suite is still invaluable as it verifies the other mechanisms that need to be
  * supported as part of a LoadIncrementalFiles call.
  */
 @Category({ MiscTests.class, LargeTests.class })
-public class TestSecureLoadIncrementalHFilesSplitRecovery
-    extends TestLoadIncrementalHFilesSplitRecovery {
+public class TestSecureBulkLoadHFilesSplitRecovery extends TestBulkLoadHFilesSplitRecovery {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestSecureLoadIncrementalHFilesSplitRecovery.class);
+    HBaseClassTestRule.forClass(TestSecureBulkLoadHFilesSplitRecovery.class);
 
   // This "overrides" the parent static method
   // make sure they are in sync


[hbase] 02/27: HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 2f406e0a5c7b0c82f4d70413a7a2e3418dd654a4
Author: zhangduo <zh...@apache.org>
AuthorDate: Sat Dec 1 21:15:48 2018 +0800

    HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager
---
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |  5 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  7 ++-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  2 +-
 .../hbase/regionserver/SecureBulkLoadManager.java  | 24 ++++-----
 .../hadoop/hbase/security/token/TokenUtil.java     | 57 +++++++++++++++++-----
 .../hadoop/hbase/security/token/TestTokenUtil.java | 42 ++++++++++++----
 6 files changed, 96 insertions(+), 41 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 78bbc3d..ac0695b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -261,13 +261,12 @@ public final class ProtobufUtil {
    * just {@link ServiceException}. Prefer this method to
    * {@link #getRemoteException(ServiceException)} because trying to
    * contain direct protobuf references.
-   * @param e
    */
-  public static IOException handleRemoteException(Exception e) {
+  public static IOException handleRemoteException(Throwable e) {
     return makeIOExceptionOfException(e);
   }
 
-  private static IOException makeIOExceptionOfException(Exception e) {
+  private static IOException makeIOExceptionOfException(Throwable e) {
     Throwable t = e;
     if (e instanceof ServiceException ||
         e instanceof org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 774b144..0072f89 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -41,7 +41,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ByteBufferExtendedCell;
@@ -124,6 +123,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Service;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -345,13 +345,12 @@ public final class ProtobufUtil {
    * just {@link ServiceException}. Prefer this method to
    * {@link #getRemoteException(ServiceException)} because trying to
    * contain direct protobuf references.
-   * @param e
    */
-  public static IOException handleRemoteException(Exception e) {
+  public static IOException handleRemoteException(Throwable e) {
     return makeIOExceptionOfException(e);
   }
 
-  private static IOException makeIOExceptionOfException(Exception e) {
+  private static IOException makeIOExceptionOfException(Throwable e) {
     Throwable t = e;
     if (e instanceof ServiceException) {
       t = e.getCause();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index b0b8b90..9dce52a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1948,7 +1948,7 @@ public class HRegionServer extends HasThread implements
     if (!isStopped() && !isAborted()) {
       initializeThreads();
     }
-    this.secureBulkLoadManager = new SecureBulkLoadManager(this.conf, clusterConnection);
+    this.secureBulkLoadManager = new SecureBulkLoadManager(this.conf, asyncClusterConnection);
     this.secureBulkLoadManager.start();
 
     // Health checker thread.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index 6b55744..d54be75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -27,7 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.function.Consumer;
-
 import org.apache.commons.lang3.mutable.MutableInt;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -38,11 +37,12 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier;
 import org.apache.hadoop.hbase.security.token.FsDelegationToken;
 import org.apache.hadoop.hbase.security.token.TokenUtil;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -56,7 +56,9 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
@@ -111,9 +113,9 @@ public class SecureBulkLoadManager {
 
   private UserProvider userProvider;
   private ConcurrentHashMap<UserGroupInformation, MutableInt> ugiReferenceCounter;
-  private Connection conn;
+  private AsyncConnection conn;
 
-  SecureBulkLoadManager(Configuration conf, Connection conn) {
+  SecureBulkLoadManager(Configuration conf, AsyncConnection conn) {
     this.conf = conf;
     this.conn = conn;
   }
@@ -218,23 +220,23 @@ public class SecureBulkLoadManager {
       familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath()));
     }
 
-    Token userToken = null;
+    Token<AuthenticationTokenIdentifier> userToken = null;
     if (userProvider.isHadoopSecurityEnabled()) {
-      userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken()
-              .getPassword().toByteArray(), new Text(request.getFsToken().getKind()), new Text(
-              request.getFsToken().getService()));
+      userToken = new Token<>(request.getFsToken().getIdentifier().toByteArray(),
+        request.getFsToken().getPassword().toByteArray(), new Text(request.getFsToken().getKind()),
+        new Text(request.getFsToken().getService()));
     }
     final String bulkToken = request.getBulkToken();
     User user = getActiveUser();
     final UserGroupInformation ugi = user.getUGI();
     if (userProvider.isHadoopSecurityEnabled()) {
       try {
-        Token tok = TokenUtil.obtainToken(conn);
+        Token<AuthenticationTokenIdentifier> tok = TokenUtil.obtainToken(conn).get();
         if (tok != null) {
           boolean b = ugi.addToken(tok);
           LOG.debug("token added " + tok + " for user " + ugi + " return=" + b);
         }
-      } catch (IOException ioe) {
+      } catch (Exception ioe) {
         LOG.warn("unable to add token", ioe);
       }
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
index c54d905..28efb84 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,27 +15,29 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.security.token;
 
+import com.google.protobuf.ByteString;
+import com.google.protobuf.ServiceException;
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.PrivilegedExceptionAction;
-
-import com.google.protobuf.ByteString;
-import com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.AsyncTable;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.AuthenticationService;
+import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenResponse;
 import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.Job;
@@ -45,6 +47,8 @@ import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
 /**
  * Utility methods for obtaining authentication tokens.
  */
@@ -64,12 +68,39 @@ public class TokenUtil {
 
   /**
    * Obtain and return an authentication token for the current user.
+   * @param conn The async HBase cluster connection
+   * @return the authentication token instance, wrapped by a {@link CompletableFuture}.
+   */
+  public static CompletableFuture<Token<AuthenticationTokenIdentifier>> obtainToken(
+      AsyncConnection conn) {
+    CompletableFuture<Token<AuthenticationTokenIdentifier>> future = new CompletableFuture<>();
+    if (injectedException != null) {
+      future.completeExceptionally(injectedException);
+      return future;
+    }
+    AsyncTable<?> table = conn.getTable(TableName.META_TABLE_NAME);
+    table.<AuthenticationService.Interface, GetAuthenticationTokenResponse> coprocessorService(
+      AuthenticationProtos.AuthenticationService::newStub,
+      (s, c, r) -> s.getAuthenticationToken(c,
+        AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance(), r),
+      HConstants.EMPTY_START_ROW).whenComplete((resp, error) -> {
+        if (error != null) {
+          future.completeExceptionally(ProtobufUtil.handleRemoteException(error));
+        } else {
+          future.complete(toToken(resp.getToken()));
+        }
+      });
+    return future;
+  }
+
+  /**
+   * Obtain and return an authentication token for the current user.
    * @param conn The HBase cluster connection
    * @throws IOException if a remote error or serialization problem occurs.
    * @return the authentication token instance
    */
-  public static Token<AuthenticationTokenIdentifier> obtainToken(
-      Connection conn) throws IOException {
+  public static Token<AuthenticationTokenIdentifier> obtainToken(Connection conn)
+      throws IOException {
     Table meta = null;
     try {
       injectFault();
@@ -77,9 +108,9 @@ public class TokenUtil {
       meta = conn.getTable(TableName.META_TABLE_NAME);
       CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
       AuthenticationProtos.AuthenticationService.BlockingInterface service =
-          AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
-      AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
-          AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
+        AuthenticationService.newBlockingStub(rpcChannel);
+      GetAuthenticationTokenResponse response =
+        service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance());
 
       return toToken(response.getToken());
     } catch (ServiceException se) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenUtil.java
index 32fcddb..585a3ec 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenUtil.java
@@ -18,35 +18,53 @@
 package org.apache.hadoop.hbase.security.token;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
 import java.net.URL;
 import java.net.URLClassLoader;
-
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 @Category(SmallTests.class)
 public class TestTokenUtil {
+
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestTokenUtil.class);
+    HBaseClassTestRule.forClass(TestTokenUtil.class);
 
-  @Test
-  public void testObtainToken() throws Exception {
+  private URLClassLoader cl;
+
+  @Before
+  public void setUp() {
     URL urlPU = ProtobufUtil.class.getProtectionDomain().getCodeSource().getLocation();
     URL urlTU = TokenUtil.class.getProtectionDomain().getCodeSource().getLocation();
+    cl = new URLClassLoader(new URL[] { urlPU, urlTU }, getClass().getClassLoader());
+  }
 
-    ClassLoader cl = new URLClassLoader(new URL[] { urlPU, urlTU }, getClass().getClassLoader());
+  @After
+  public void tearDown() throws IOException {
+    Closeables.close(cl, true);
+  }
 
+  @Test
+  public void testObtainToken() throws Exception {
     Throwable injected = new com.google.protobuf.ServiceException("injected");
 
     Class<?> tokenUtil = cl.loadClass(TokenUtil.class.getCanonicalName());
@@ -55,8 +73,7 @@ public class TestTokenUtil {
     shouldInjectFault.set(null, injected);
 
     try {
-      tokenUtil.getMethod("obtainToken", Connection.class)
-          .invoke(null, new Object[] { null });
+      tokenUtil.getMethod("obtainToken", Connection.class).invoke(null, new Object[] { null });
       fail("Should have injected exception.");
     } catch (InvocationTargetException e) {
       Throwable t = e;
@@ -72,9 +89,16 @@ public class TestTokenUtil {
       }
     }
 
+    CompletableFuture<?> future = (CompletableFuture<?>) tokenUtil
+      .getMethod("obtainToken", AsyncConnection.class).invoke(null, new Object[] { null });
+    try {
+      future.get();
+      fail("Should have injected exception.");
+    } catch (ExecutionException e) {
+      assertSame(injected, e.getCause());
+    }
     Boolean loaded = (Boolean) cl.loadClass(ProtobufUtil.class.getCanonicalName())
-        .getDeclaredMethod("isClassLoaderLoaded")
-        .invoke(null);
+      .getDeclaredMethod("isClassLoaderLoaded").invoke(null);
     assertFalse("Should not have loaded DynamicClassLoader", loaded);
   }
 }


[hbase] 20/27: HBASE-22297 Fix TestRegionMergeTransitionOnCluster and TestSplitTransactionOnCluster

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d43461a6d580067b5bfd286d459179fe4ba7cf35
Author: zhangduo <zh...@apache.org>
AuthorDate: Tue Apr 23 22:22:39 2019 +0800

    HBASE-22297 Fix TestRegionMergeTransitionOnCluster and TestSplitTransactionOnCluster
    
    Signed-off-by: Michael Stack <st...@apache.org>
---
 .../TestRegionMergeTransactionOnCluster.java          | 19 +++++++++----------
 .../regionserver/TestSplitTransactionOnCluster.java   |  3 ++-
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index ea93468..7b54ffb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -26,7 +26,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Objects;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -63,6 +62,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
@@ -312,7 +312,6 @@ public class TestRegionMergeTransactionOnCluster {
     LOG.info("Starting " + name.getMethodName());
     final TableName tableName = TableName.valueOf(name.getMethodName());
     final Admin admin = TEST_UTIL.getAdmin();
-    final int syncWaitTimeout = 10 * 60000; // 10min
 
     try {
       // Create table and load data.
@@ -326,8 +325,8 @@ public class TestRegionMergeTransactionOnCluster {
       am.offlineRegion(b);
       try {
         // Merge offline region. Region a is offline here
-        admin.mergeRegionsAsync(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false)
-                .get(syncWaitTimeout, TimeUnit.MILLISECONDS);
+        FutureUtils.get(
+          admin.mergeRegionsAsync(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false));
         fail("Offline regions should not be able to merge");
       } catch (DoNotRetryRegionException ie) {
         System.out.println(ie);
@@ -336,21 +335,21 @@ public class TestRegionMergeTransactionOnCluster {
 
       try {
         // Merge the same region: b and b.
-        admin.mergeRegionsAsync(b.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), true);
+        FutureUtils
+          .get(admin.mergeRegionsAsync(b.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), true));
         fail("A region should not be able to merge with itself, even forcifully");
       } catch (IOException ie) {
         assertTrue("Exception should mention regions not online",
-          StringUtils.stringifyException(ie).contains("region to itself")
-            && ie instanceof MergeRegionException);
+          StringUtils.stringifyException(ie).contains("region to itself") &&
+            ie instanceof MergeRegionException);
       }
 
       try {
         // Merge unknown regions
-        admin.mergeRegionsAsync(Bytes.toBytes("-f1"), Bytes.toBytes("-f2"), true);
+        FutureUtils.get(admin.mergeRegionsAsync(Bytes.toBytes("-f1"), Bytes.toBytes("-f2"), true));
         fail("Unknown region could not be merged");
       } catch (IOException ie) {
-        assertTrue("UnknownRegionException should be thrown",
-          ie instanceof UnknownRegionException);
+        assertTrue("UnknownRegionException should be thrown", ie instanceof UnknownRegionException);
       }
       table.close();
     } finally {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index f251df5..fe74572 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -90,6 +90,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.HBaseFsck;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
@@ -328,7 +329,7 @@ public class TestSplitTransactionOnCluster {
       // We don't roll back here anymore. Instead we fail-fast on construction of the
       // split transaction. Catch the exception instead.
       try {
-        this.admin.splitRegionAsync(hri.getRegionName());
+        FutureUtils.get(this.admin.splitRegionAsync(hri.getRegionName()));
         fail();
       } catch (DoNotRetryRegionException e) {
         // Expected


[hbase] 18/27: HBASE-22281 Fix failed shell UTs

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9de0cd8474db4de594e2fa3bea5f3af38a31e32a
Author: zhangduo <zh...@apache.org>
AuthorDate: Mon Apr 22 22:00:43 2019 +0800

    HBASE-22281 Fix failed shell UTs
---
 hbase-shell/src/main/ruby/hbase/admin.rb | 44 ++++++++++++++++++++++++--------
 hbase-shell/src/main/ruby/hbase/hbase.rb | 23 +++++++++++------
 hbase-shell/src/main/ruby/hbase/table.rb |  2 +-
 3 files changed, 50 insertions(+), 19 deletions(-)

diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 4187a0e..98b5392 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -54,7 +54,7 @@ module Hbase
     # Requests a table or region or region server flush
     def flush(name)
       @admin.flushRegion(name.to_java_bytes)
-    rescue java.lang.IllegalArgumentException
+    rescue java.lang.IllegalArgumentException, org.apache.hadoop.hbase.UnknownRegionException
       # Unknown region. Try table.
       begin
         @admin.flush(TableName.valueOf(name))
@@ -79,9 +79,17 @@ module Hbase
       end
 
       begin
-        @admin.compactRegion(table_or_region_name.to_java_bytes, family_bytes)
-      rescue java.lang.IllegalArgumentException => e
-        @admin.compact(TableName.valueOf(table_or_region_name), family_bytes, compact_type)
+        if family_bytes.nil?
+          @admin.compactRegion(table_or_region_name.to_java_bytes)
+        else
+          @admin.compactRegion(table_or_region_name.to_java_bytes, family_bytes)
+        end
+      rescue java.lang.IllegalArgumentException, org.apache.hadoop.hbase.UnknownRegionException
+        if family_bytes.nil?
+          @admin.compact(TableName.valueOf(table_or_region_name), compact_type)
+        else
+          @admin.compact(TableName.valueOf(table_or_region_name), family_bytes, compact_type)
+        end
       end
     end
 
@@ -124,9 +132,17 @@ module Hbase
       end
 
       begin
-        @admin.majorCompactRegion(table_or_region_name.to_java_bytes, family_bytes)
-      rescue java.lang.IllegalArgumentException => e
-        @admin.majorCompact(TableName.valueOf(table_or_region_name), family_bytes, compact_type)
+        if family_bytes.nil?
+          @admin.majorCompactRegion(table_or_region_name.to_java_bytes)
+        else
+          @admin.majorCompactRegion(table_or_region_name.to_java_bytes, family_bytes)
+        end
+      rescue java.lang.IllegalArgumentException, org.apache.hadoop.hbase.UnknownRegionException
+        if family_bytes.nil?
+          @admin.majorCompact(TableName.valueOf(table_or_region_name), compact_type)
+        else
+          @admin.majorCompact(TableName.valueOf(table_or_region_name), family_bytes, compact_type)
+        end
       end
     end
 
@@ -144,9 +160,17 @@ module Hbase
       split_point_bytes = nil
       split_point_bytes = split_point.to_java_bytes unless split_point.nil?
       begin
-        @admin.splitRegionAsync(table_or_region_name.to_java_bytes, split_point_bytes).get
-      rescue java.lang.IllegalArgumentException, org.apache.hadoop.hbase.UnknownRegionException => e
-        @admin.split(TableName.valueOf(table_or_region_name), split_point_bytes)
+        if split_point_bytes.nil?
+          org.apache.hadoop.hbase.util.FutureUtils.get(@admin.splitRegionAsync(table_or_region_name.to_java_bytes))
+        else
+          org.apache.hadoop.hbase.util.FutureUtils.get(@admin.splitRegionAsync(table_or_region_name.to_java_bytes, split_point_bytes))
+        end
+      rescue java.lang.IllegalArgumentException, org.apache.hadoop.hbase.UnknownRegionException
+        if split_point_bytes.nil?
+          @admin.split(TableName.valueOf(table_or_region_name))
+        else
+          @admin.split(TableName.valueOf(table_or_region_name), split_point_bytes)
+        end
       end
     end
 
diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb
index 1f37f99..a9b35ed 100644
--- a/hbase-shell/src/main/ruby/hbase/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase/hbase.rb
@@ -42,16 +42,21 @@ module Hbase
         configuration.setInt('hbase.client.retries.number', 7)
         configuration.setInt('hbase.ipc.client.connect.max.retries', 3)
       end
-      @connection = ConnectionFactory.createConnection(configuration)
     end
 
+    def connection
+      if @connection.nil?
+        @connection = ConnectionFactory.createConnection(configuration)
+      end
+      @connection
+    end
     # Returns ruby's Admin class from admin.rb
     def admin
-      ::Hbase::Admin.new(@connection)
+      ::Hbase::Admin.new(self.connection)
     end
 
     def rsgroup_admin
-      ::Hbase::RSGroupAdmin.new(@connection)
+      ::Hbase::RSGroupAdmin.new(self.connection)
     end
 
     def taskmonitor
@@ -60,7 +65,7 @@ module Hbase
 
     # Create new one each time
     def table(table, shell)
-      ::Hbase::Table.new(@connection.getTable(TableName.valueOf(table)), shell)
+      ::Hbase::Table.new(self.connection.getTable(TableName.valueOf(table)), shell)
     end
 
     def replication_admin
@@ -68,19 +73,21 @@ module Hbase
     end
 
     def security_admin
-      ::Hbase::SecurityAdmin.new(@connection.getAdmin)
+      ::Hbase::SecurityAdmin.new(self.connection.getAdmin)
     end
 
     def visibility_labels_admin
-      ::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin)
+      ::Hbase::VisibilityLabelsAdmin.new(self.connection.getAdmin)
     end
 
     def quotas_admin
-      ::Hbase::QuotasAdmin.new(@connection.getAdmin)
+      ::Hbase::QuotasAdmin.new(self.connection.getAdmin)
     end
 
     def shutdown
-      @connection.close
+      if @connection != nil
+        @connection.close
+      end
     end
   end
 end
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb
index 8c7144d..53b090e 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -717,7 +717,7 @@ EOF
 
     # Returns a list of column names in the table
     def get_all_columns
-      @table.table_descriptor.getFamilies.map do |family|
+      @table.descriptor.getColumnFamilies.map do |family|
         "#{family.getNameAsString}:"
       end
     end


[hbase] 24/27: HBASE-22328 NPE in RegionReplicaReplicationEndpoint

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 3083d5626fcece8b1e82efbaf08815ae5777ce46
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Apr 30 16:33:58 2019 +0800

    HBASE-22328 NPE in RegionReplicaReplicationEndpoint
---
 .../regionserver/RegionReplicaReplicationEndpoint.java     | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index 65cf9a8..cc2650f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -151,21 +151,23 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
   private void getRegionLocations(CompletableFuture<RegionLocations> future,
       TableDescriptor tableDesc, byte[] encodedRegionName, byte[] row, boolean reload) {
     FutureUtils.addListener(connection.getRegionLocations(tableDesc.getTableName(), row, reload),
-      (r, e) -> {
+      (locs, e) -> {
         if (e != null) {
           future.completeExceptionally(e);
           return;
         }
         // if we are not loading from cache, just return
         if (reload) {
-          future.complete(r);
+          future.complete(locs);
           return;
         }
         // check if the number of region replicas is correct, and also the primary region name
-        // matches
-        if (r.size() == tableDesc.getRegionReplication() && Bytes.equals(
-          r.getDefaultRegionLocation().getRegion().getEncodedNameAsBytes(), encodedRegionName)) {
-          future.complete(r);
+        // matches, and also there is no null elements in the returned RegionLocations
+        if (locs.size() == tableDesc.getRegionReplication() &&
+          locs.size() == locs.numNonNullElements() &&
+          Bytes.equals(locs.getDefaultRegionLocation().getRegion().getEncodedNameAsBytes(),
+            encodedRegionName)) {
+          future.complete(locs);
         } else {
           // reload again as the information in cache maybe stale
           getRegionLocations(future, tableDesc, encodedRegionName, row, true);


[hbase] 21/27: HBASE-22302 Fix TestHbck

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 82a1ea9857a664c009106829470c93eb8800b46f
Author: zhangduo <zh...@apache.org>
AuthorDate: Wed Apr 24 22:30:02 2019 +0800

    HBASE-22302 Fix TestHbck
---
 .../src/test/java/org/apache/hadoop/hbase/client/TestHbck.java        | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
index d9a7ca9..ee277d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
@@ -29,7 +29,6 @@ import java.util.Optional;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -273,7 +272,8 @@ public class TestHbck {
     FailingSplitAfterMetaUpdatedMasterObserver observer = master.getMasterCoprocessorHost()
         .findCoprocessor(FailingSplitAfterMetaUpdatedMasterObserver.class);
     assertNotNull(observer);
-    try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
+    try {
+      AsyncAdmin admin = TEST_UTIL.getAsyncConnection().getAdmin();
       byte[] splitKey = Bytes.toBytes("bcd");
       admin.split(TableName.valueOf(testTable), splitKey);
       observer.latch.await(5000, TimeUnit.MILLISECONDS);


[hbase] 04/27: HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit e5b82e2b234139f87616011f4c75905b31b079b5
Author: zhangduo <zh...@apache.org>
AuthorDate: Tue Jan 1 21:27:14 2019 +0800

    HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint
---
 .../hbase/client/AsyncRegionServerAdmin.java       | 14 +++++---
 .../hbase/protobuf/ReplicationProtbufUtil.java     | 35 ++++++++++---------
 .../HBaseInterClusterReplicationEndpoint.java      | 31 +++++++++--------
 .../regionserver/ReplicationSinkManager.java       | 40 ++++++++--------------
 .../hbase/replication/SyncReplicationTestBase.java | 12 +++----
 .../regionserver/TestReplicationSinkManager.java   | 21 +++++-------
 6 files changed, 74 insertions(+), 79 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
index 9accd89..b9141a9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -94,9 +95,9 @@ public class AsyncRegionServerAdmin {
     void call(AdminService.Interface stub, HBaseRpcController controller, RpcCallback<RESP> done);
   }
 
-  private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall) {
+  private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall, CellScanner cellScanner) {
     CompletableFuture<RESP> future = new CompletableFuture<>();
-    HBaseRpcController controller = conn.rpcControllerFactory.newController();
+    HBaseRpcController controller = conn.rpcControllerFactory.newController(cellScanner);
     try {
       rpcCall.call(conn.getAdminStub(server), controller, new RpcCallback<RESP>() {
 
@@ -115,6 +116,10 @@ public class AsyncRegionServerAdmin {
     return future;
   }
 
+  private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall) {
+    return call(rpcCall, null);
+  }
+
   public CompletableFuture<GetRegionInfoResponse> getRegionInfo(GetRegionInfoRequest request) {
     return call((stub, controller, done) -> stub.getRegionInfo(controller, request, done));
   }
@@ -154,8 +159,9 @@ public class AsyncRegionServerAdmin {
   }
 
   public CompletableFuture<ReplicateWALEntryResponse> replicateWALEntry(
-      ReplicateWALEntryRequest request) {
-    return call((stub, controller, done) -> stub.replicateWALEntry(controller, request, done));
+      ReplicateWALEntryRequest request, CellScanner cellScanner) {
+    return call((stub, controller, done) -> stub.replicateWALEntry(controller, request, done),
+      cellScanner);
   }
 
   public CompletableFuture<ReplicateWALEntryResponse> replay(ReplicateWALEntryRequest request) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index c1b3911..74fad26 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -20,51 +20,54 @@ package org.apache.hadoop.hbase.protobuf;
 
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.HBaseRpcControllerImpl;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 
 @InterfaceAudience.Private
 public class ReplicationProtbufUtil {
+
   /**
-   * A helper to replicate a list of WAL entries using admin protocol.
-   * @param admin Admin service
+   * A helper to replicate a list of WAL entries using region server admin
+   * @param admin the region server admin
    * @param entries Array of WAL entries to be replicated
    * @param replicationClusterId Id which will uniquely identify source cluster FS client
    *          configurations in the replication configuration directory
    * @param sourceBaseNamespaceDir Path to source cluster base namespace directory
    * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
-   * @throws java.io.IOException
    */
-  public static void replicateWALEntry(final AdminService.BlockingInterface admin,
-      final Entry[] entries, String replicationClusterId, Path sourceBaseNamespaceDir,
-      Path sourceHFileArchiveDir) throws IOException {
-    Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
-        buildReplicateWALEntryRequest(entries, null, replicationClusterId, sourceBaseNamespaceDir,
-          sourceHFileArchiveDir);
-    HBaseRpcController controller = new HBaseRpcControllerImpl(p.getSecond());
+  public static void replicateWALEntry(AsyncRegionServerAdmin admin, Entry[] entries,
+      String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir)
+      throws IOException {
+    Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(
+      entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
     try {
-      admin.replicateWALEntry(controller, p.getFirst());
-    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {
-      throw ProtobufUtil.getServiceException(e);
+      admin.replicateWALEntry(p.getFirst(), p.getSecond()).get();
+    } catch (InterruptedException e) {
+      throw (IOException) new InterruptedIOException().initCause(e);
+    } catch (ExecutionException e) {
+      Throwable cause = e.getCause();
+      Throwables.propagateIfPossible(cause, IOException.class);
+      throw new IOException(e);
     }
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index 57301fc..81ed4b8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -39,7 +39,6 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -48,13 +47,16 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
@@ -65,8 +67,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-
 /**
  * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint}
  * implementation for replicating to another HBase cluster.
@@ -85,8 +85,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
 
   private static final long DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER = 2;
 
-  private ClusterConnection conn;
-  private Configuration localConf;
+  private AsyncClusterConnection conn;
   private Configuration conf;
   // How long should we sleep for each retry
   private long sleepForRetries;
@@ -117,7 +116,6 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
   public void init(Context context) throws IOException {
     super.init(context);
     this.conf = HBaseConfiguration.create(ctx.getConfiguration());
-    this.localConf = HBaseConfiguration.create(ctx.getLocalConfiguration());
     decorateConf();
     this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
     this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
@@ -132,12 +130,13 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
     // TODO: This connection is replication specific or we should make it particular to
     // replication and make replication specific settings such as compression or codec to use
     // passing Cells.
-    this.conn = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
+    this.conn =
+      ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent());
     this.sleepForRetries =
         this.conf.getLong("replication.source.sleepforretries", 1000);
     this.metrics = context.getMetrics();
     // ReplicationQueueInfo parses the peerId out of the znode for us
-    this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
+    this.replicationSinkMgr = new ReplicationSinkManager(conn, this, this.conf);
     // per sink thread pool
     this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY,
       HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
@@ -284,9 +283,10 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
   }
 
   private void reconnectToPeerCluster() {
-    ClusterConnection connection = null;
+    AsyncClusterConnection connection = null;
     try {
-      connection = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
+      connection =
+        ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent());
     } catch (IOException ioe) {
       LOG.warn("Failed to create connection for peer cluster", ioe);
     }
@@ -366,7 +366,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
         }
         continue;
       }
-      if (this.conn == null || this.conn.isClosed()) {
+      if (this.conn == null) {
         reconnectToPeerCluster();
       }
       try {
@@ -470,10 +470,11 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
           entriesHashCode, entries.size(), size, replicationClusterId);
       }
       sinkPeer = replicationSinkMgr.getReplicationSink();
-      BlockingInterface rrs = sinkPeer.getRegionServer();
+      AsyncRegionServerAdmin rsAdmin = sinkPeer.getRegionServer();
       try {
-        ReplicationProtbufUtil.replicateWALEntry(rrs, entries.toArray(new Entry[entries.size()]),
-          replicationClusterId, baseNamespaceDir, hfileArchiveDir);
+        ReplicationProtbufUtil.replicateWALEntry(rsAdmin,
+          entries.toArray(new Entry[entries.size()]), replicationClusterId, baseNamespaceDir,
+          hfileArchiveDir);
         LOG.trace("Completed replicating batch {}", entriesHashCode);
       } catch (IOException e) {
         LOG.trace("Failed replicating batch {}", entriesHashCode, e);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
index 3cd7884..21b07ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
@@ -21,11 +21,11 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -35,8 +35,6 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-
 /**
  * Maintains a collection of peers to replicate to, and randomly selects a
  * single peer to replicate to per set of data to replicate. Also handles
@@ -61,9 +59,7 @@ public class ReplicationSinkManager {
   static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f;
 
 
-  private final Connection conn;
-
-  private final String peerClusterId;
+  private final AsyncClusterConnection conn;
 
   private final HBaseReplicationEndpoint endpoint;
 
@@ -77,8 +73,6 @@ public class ReplicationSinkManager {
   // replication sinks is refreshed
   private final int badSinkThreshold;
 
-  private final Random random;
-
   // A timestamp of the last time the list of replication peers changed
   private long lastUpdateToPeers;
 
@@ -88,26 +82,22 @@ public class ReplicationSinkManager {
   /**
    * Instantiate for a single replication peer cluster.
    * @param conn connection to the peer cluster
-   * @param peerClusterId identifier of the peer cluster
    * @param endpoint replication endpoint for inter cluster replication
    * @param conf HBase configuration, used for determining replication source ratio and bad peer
    *          threshold
    */
-  public ReplicationSinkManager(ClusterConnection conn, String peerClusterId,
-      HBaseReplicationEndpoint endpoint, Configuration conf) {
+  public ReplicationSinkManager(AsyncClusterConnection conn, HBaseReplicationEndpoint endpoint,
+      Configuration conf) {
     this.conn = conn;
-    this.peerClusterId = peerClusterId;
     this.endpoint = endpoint;
     this.badReportCounts = Maps.newHashMap();
     this.ratio = conf.getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO);
-    this.badSinkThreshold = conf.getInt("replication.bad.sink.threshold",
-                                        DEFAULT_BAD_SINK_THRESHOLD);
-    this.random = new Random();
+    this.badSinkThreshold =
+      conf.getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD);
   }
 
   /**
    * Get a randomly-chosen replication sink to replicate to.
-   *
    * @return a replication sink to replicate to
    */
   public synchronized SinkPeer getReplicationSink() throws IOException {
@@ -119,8 +109,8 @@ public class ReplicationSinkManager {
     if (sinks.isEmpty()) {
       throw new IOException("No replication sinks are available");
     }
-    ServerName serverName = sinks.get(random.nextInt(sinks.size()));
-    return new SinkPeer(serverName, ((ClusterConnection) conn).getAdmin(serverName));
+    ServerName serverName = sinks.get(ThreadLocalRandom.current().nextInt(sinks.size()));
+    return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName));
   }
 
   /**
@@ -160,7 +150,7 @@ public class ReplicationSinkManager {
    */
   public synchronized void chooseSinks() {
     List<ServerName> slaveAddresses = endpoint.getRegionServers();
-    Collections.shuffle(slaveAddresses, random);
+    Collections.shuffle(slaveAddresses, ThreadLocalRandom.current());
     int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio);
     sinks = slaveAddresses.subList(0, numSinks);
     lastUpdateToPeers = System.currentTimeMillis();
@@ -182,9 +172,9 @@ public class ReplicationSinkManager {
    */
   public static class SinkPeer {
     private ServerName serverName;
-    private AdminService.BlockingInterface regionServer;
+    private AsyncRegionServerAdmin regionServer;
 
-    public SinkPeer(ServerName serverName, AdminService.BlockingInterface regionServer) {
+    public SinkPeer(ServerName serverName, AsyncRegionServerAdmin regionServer) {
       this.serverName = serverName;
       this.regionServer = regionServer;
     }
@@ -193,10 +183,8 @@ public class ReplicationSinkManager {
       return serverName;
     }
 
-    public AdminService.BlockingInterface getRegionServer() {
+    public AsyncRegionServerAdmin getRegionServer() {
       return regionServer;
     }
-
   }
-
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
index f373590..e0d112d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.StartMiniClusterOption;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -250,19 +250,19 @@ public class SyncReplicationTestBase {
   protected final void verifyReplicationRequestRejection(HBaseTestingUtility utility,
       boolean expectedRejection) throws Exception {
     HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
-    ClusterConnection connection = regionServer.getClusterConnection();
+    AsyncClusterConnection connection = regionServer.getAsyncClusterConnection();
     Entry[] entries = new Entry[10];
     for (int i = 0; i < entries.length; i++) {
       entries[i] =
         new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit());
     }
     if (!expectedRejection) {
-      ReplicationProtbufUtil.replicateWALEntry(connection.getAdmin(regionServer.getServerName()),
-        entries, null, null, null);
+      ReplicationProtbufUtil.replicateWALEntry(
+        connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null);
     } else {
       try {
-        ReplicationProtbufUtil.replicateWALEntry(connection.getAdmin(regionServer.getServerName()),
-          entries, null, null, null);
+        ReplicationProtbufUtil.replicateWALEntry(
+          connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null);
         fail("Should throw IOException when sync-replication state is in A or DA");
       } catch (DoNotRetryIOException e) {
         assertTrue(e.getMessage().contains("Reject to apply to sink cluster"));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
index 39dabb4..60afd40 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
@@ -25,7 +25,8 @@ import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
@@ -37,8 +38,6 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-
 @Category({ReplicationTests.class, SmallTests.class})
 public class TestReplicationSinkManager {
 
@@ -46,16 +45,14 @@ public class TestReplicationSinkManager {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestReplicationSinkManager.class);
 
-  private static final String PEER_CLUSTER_ID = "PEER_CLUSTER_ID";
-
   private HBaseReplicationEndpoint replicationEndpoint;
   private ReplicationSinkManager sinkManager;
 
   @Before
   public void setUp() {
     replicationEndpoint = mock(HBaseReplicationEndpoint.class);
-    sinkManager = new ReplicationSinkManager(mock(ClusterConnection.class),
-                      PEER_CLUSTER_ID, replicationEndpoint, new Configuration());
+    sinkManager = new ReplicationSinkManager(mock(AsyncClusterConnection.class),
+      replicationEndpoint, new Configuration());
   }
 
   @Test
@@ -100,7 +97,7 @@ public class TestReplicationSinkManager {
     // Sanity check
     assertEquals(1, sinkManager.getNumSinks());
 
-    SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AdminService.BlockingInterface.class));
+    SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class));
 
     sinkManager.reportBadSink(sinkPeer);
 
@@ -131,7 +128,7 @@ public class TestReplicationSinkManager {
 
     ServerName serverName = sinkManager.getSinksForTesting().get(0);
 
-    SinkPeer sinkPeer = new SinkPeer(serverName, mock(AdminService.BlockingInterface.class));
+    SinkPeer sinkPeer = new SinkPeer(serverName, mock(AsyncRegionServerAdmin.class));
 
     sinkManager.reportSinkSuccess(sinkPeer); // has no effect, counter does not go negative
     for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) {
@@ -147,7 +144,7 @@ public class TestReplicationSinkManager {
     //
     serverName = sinkManager.getSinksForTesting().get(0);
 
-    sinkPeer = new SinkPeer(serverName, mock(AdminService.BlockingInterface.class));
+    sinkPeer = new SinkPeer(serverName, mock(AsyncRegionServerAdmin.class));
     for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD-1; i++) {
       sinkManager.reportBadSink(sinkPeer);
     }
@@ -188,8 +185,8 @@ public class TestReplicationSinkManager {
     ServerName serverNameA = sinkList.get(0);
     ServerName serverNameB = sinkList.get(1);
 
-    SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AdminService.BlockingInterface.class));
-    SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AdminService.BlockingInterface.class));
+    SinkPeer sinkPeerA = new SinkPeer(serverNameA, mock(AsyncRegionServerAdmin.class));
+    SinkPeer sinkPeerB = new SinkPeer(serverNameB, mock(AsyncRegionServerAdmin.class));
 
     for (int i = 0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) {
       sinkManager.reportBadSink(sinkPeerA);


[hbase] 06/27: HBASE-21671 Rewrite RegionReplicaReplicationEndpoint to use AsyncClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 0e29c9df67a81d96187cf398839911ff28797c05
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Fri Jan 11 16:22:24 2019 +0800

    HBASE-21671 Rewrite RegionReplicaReplicationEndpoint to use AsyncClusterConnection
---
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |  25 +-
 .../hbase/client/AsyncClusterConnection.java       |  17 +
 .../hbase/client/AsyncClusterConnectionImpl.java   |  80 +++
 .../AsyncRegionReplicaReplayRetryingCaller.java    | 147 ++++
 .../hbase/client/AsyncRegionServerAdmin.java       |   5 +-
 .../hbase/client/ClusterConnectionFactory.java     |   2 +-
 .../hbase/protobuf/ReplicationProtbufUtil.java     |  31 +-
 .../handler/RegionReplicaFlushHandler.java         |   3 +-
 .../hbase/replication/ReplicationEndpoint.java     |  35 +-
 .../RegionReplicaReplicationEndpoint.java          | 782 +++++++--------------
 .../regionserver/ReplicationSource.java            |   2 +-
 .../TestRegionReplicaReplicationEndpoint.java      |  56 +-
 ...stRegionReplicaReplicationEndpointNoMaster.java |  98 ++-
 13 files changed, 627 insertions(+), 656 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index c17cca9..d3d50d7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -60,7 +60,6 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
 import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
@@ -69,7 +68,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterServ
  * The implementation of AsyncConnection.
  */
 @InterfaceAudience.Private
-class AsyncConnectionImpl implements AsyncClusterConnection {
+class AsyncConnectionImpl implements AsyncConnection {
 
   private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionImpl.class);
 
@@ -89,7 +88,7 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
 
   private final int rpcTimeout;
 
-  private final RpcClient rpcClient;
+  protected final RpcClient rpcClient;
 
   final RpcControllerFactory rpcControllerFactory;
 
@@ -218,16 +217,10 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
   }
 
   // ditto
-  @Override
-  public NonceGenerator getNonceGenerator() {
+  NonceGenerator getNonceGenerator() {
     return nonceGenerator;
   }
 
-  @Override
-  public RpcClient getRpcClient() {
-    return rpcClient;
-  }
-
   private ClientService.Interface createRegionServerStub(ServerName serverName) throws IOException {
     return ClientService.newStub(rpcClient.createRpcChannel(serverName, user, rpcTimeout));
   }
@@ -380,16 +373,4 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
   Optional<MetricsConnection> getConnectionMetrics() {
     return metrics;
   }
-
-  @Override
-  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
-    return new AsyncRegionServerAdmin(serverName, this);
-  }
-
-  @Override
-  public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
-      boolean writeFlushWALMarker) {
-    RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin();
-    return admin.flushRegionInternal(regionName, writeFlushWALMarker);
-  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
similarity index 72%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index f1f64ca..0ad77ba 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.List;
 import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
@@ -49,4 +53,17 @@ public interface AsyncClusterConnection extends AsyncConnection {
    * Flush a region and get the response.
    */
   CompletableFuture<FlushRegionResponse> flush(byte[] regionName, boolean writeFlushWALMarker);
+
+  /**
+   * Replicate wal edits for replica regions. The return value is the edits we skipped, as the
+   * original return value is useless.
+   */
+  CompletableFuture<Long> replay(TableName tableName, byte[] encodedRegionName, byte[] row,
+      List<Entry> entries, int replicaId, int numRetries, long operationTimeoutNs);
+
+  /**
+   * Return all the replicas for a region. Used for regiong replica replication.
+   */
+  CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
+      boolean reload);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
new file mode 100644
index 0000000..d61f01f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.net.SocketAddress;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+
+/**
+ * The implementation of AsyncClusterConnection.
+ */
+@InterfaceAudience.Private
+class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClusterConnection {
+
+  public AsyncClusterConnectionImpl(Configuration conf, AsyncRegistry registry, String clusterId,
+      SocketAddress localAddress, User user) {
+    super(conf, registry, clusterId, localAddress, user);
+  }
+
+  @Override
+  public NonceGenerator getNonceGenerator() {
+    return super.getNonceGenerator();
+  }
+
+  @Override
+  public RpcClient getRpcClient() {
+    return rpcClient;
+  }
+
+  @Override
+  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
+    return new AsyncRegionServerAdmin(serverName, this);
+  }
+
+  @Override
+  public CompletableFuture<FlushRegionResponse> flush(byte[] regionName,
+      boolean writeFlushWALMarker) {
+    RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin();
+    return admin.flushRegionInternal(regionName, writeFlushWALMarker);
+  }
+
+  @Override
+  public CompletableFuture<Long> replay(TableName tableName, byte[] encodedRegionName, byte[] row,
+      List<Entry> entries, int replicaId, int retries, long operationTimeoutNs) {
+    return new AsyncRegionReplicaReplayRetryingCaller(RETRY_TIMER, this,
+      ConnectionUtils.retries2Attempts(retries), operationTimeoutNs, tableName, encodedRegionName,
+      row, entries, replicaId).call();
+  }
+
+  @Override
+  public CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
+      boolean reload) {
+    return getLocator().getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L);
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java
new file mode 100644
index 0000000..91d9502
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicaReplayRetryingCaller.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
+
+/**
+ * For replaying edits for region replica.
+ * <p/>
+ * The mainly difference here is that, every time after locating, we will check whether the region
+ * name is equal, if not, we will give up, as this usually means the region has been split or
+ * merged, and the new region(s) should already have all the data of the parent region(s).
+ * <p/>
+ * Notice that, the return value is the edits we skipped, as the original response message is not
+ * used at upper layer.
+ */
+@InterfaceAudience.Private
+public class AsyncRegionReplicaReplayRetryingCaller extends AsyncRpcRetryingCaller<Long> {
+
+  private static final Logger LOG =
+    LoggerFactory.getLogger(AsyncRegionReplicaReplayRetryingCaller.class);
+
+  private final TableName tableName;
+
+  private final byte[] encodedRegionName;
+
+  private final byte[] row;
+
+  private final Entry[] entries;
+
+  private final int replicaId;
+
+  public AsyncRegionReplicaReplayRetryingCaller(HashedWheelTimer retryTimer,
+      AsyncClusterConnectionImpl conn, int maxAttempts, long operationTimeoutNs,
+      TableName tableName, byte[] encodedRegionName, byte[] row, List<Entry> entries,
+      int replicaId) {
+    super(retryTimer, conn, ConnectionUtils.getPriority(tableName), conn.connConf.getPauseNs(),
+      conn.connConf.getPauseForCQTBENs(), maxAttempts, operationTimeoutNs,
+      conn.connConf.getWriteRpcTimeoutNs(), conn.connConf.getStartLogErrorsCnt());
+    this.tableName = tableName;
+    this.encodedRegionName = encodedRegionName;
+    this.row = row;
+    this.entries = entries.toArray(new Entry[0]);
+    this.replicaId = replicaId;
+  }
+
+  private void call(HRegionLocation loc) {
+    if (!Bytes.equals(encodedRegionName, loc.getRegion().getEncodedNameAsBytes())) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+          "Skipping {} entries in table {} because located region {} is different than" +
+            " the original region {} from WALEdit",
+          entries.length, tableName, loc.getRegion().getEncodedName(),
+          Bytes.toStringBinary(encodedRegionName));
+        for (Entry entry : entries) {
+          LOG.trace("Skipping : " + entry);
+        }
+      }
+      future.complete(Long.valueOf(entries.length));
+      return;
+    }
+
+    AdminService.Interface stub;
+    try {
+      stub = conn.getAdminStub(loc.getServerName());
+    } catch (IOException e) {
+      onError(e,
+        () -> "Get async admin stub to " + loc.getServerName() + " for '" +
+          Bytes.toStringBinary(row) + "' in " + loc.getRegion().getEncodedName() + " of " +
+          tableName + " failed",
+        err -> conn.getLocator().updateCachedLocationOnError(loc, err));
+      return;
+    }
+    Pair<ReplicateWALEntryRequest, CellScanner> p = ReplicationProtbufUtil
+      .buildReplicateWALEntryRequest(entries, encodedRegionName, null, null, null);
+    resetCallTimeout();
+    controller.setCellScanner(p.getSecond());
+    stub.replay(controller, p.getFirst(), r -> {
+      if (controller.failed()) {
+        onError(controller.getFailed(),
+          () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " +
+            loc.getRegion().getEncodedName() + " of " + tableName + " failed",
+          err -> conn.getLocator().updateCachedLocationOnError(loc, err));
+      } else {
+        future.complete(0L);
+      }
+    });
+
+  }
+
+  @Override
+  protected void doCall() {
+    long locateTimeoutNs;
+    if (operationTimeoutNs > 0) {
+      locateTimeoutNs = remainingTimeNs();
+      if (locateTimeoutNs <= 0) {
+        completeExceptionally();
+        return;
+      }
+    } else {
+      locateTimeoutNs = -1L;
+    }
+    addListener(conn.getLocator().getRegionLocation(tableName, row, replicaId,
+      RegionLocateType.CURRENT, locateTimeoutNs), (loc, error) -> {
+        if (error != null) {
+          onError(error,
+            () -> "Locate '" + Bytes.toStringBinary(row) + "' in " + tableName + " failed", err -> {
+            });
+          return;
+        }
+        call(loc);
+      });
+  }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
similarity index 99%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
index b9141a9..d491890 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
@@ -164,8 +164,9 @@ public class AsyncRegionServerAdmin {
       cellScanner);
   }
 
-  public CompletableFuture<ReplicateWALEntryResponse> replay(ReplicateWALEntryRequest request) {
-    return call((stub, controller, done) -> stub.replay(controller, request, done));
+  public CompletableFuture<ReplicateWALEntryResponse> replay(ReplicateWALEntryRequest request,
+      CellScanner cellScanner) {
+    return call((stub, controller, done) -> stub.replay(controller, request, done), cellScanner);
   }
 
   public CompletableFuture<RollWALWriterResponse> rollWALWriter(RollWALWriterRequest request) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
similarity index 95%
rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
index 79484db..2670420 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -46,6 +46,6 @@ public final class ClusterConnectionFactory {
       SocketAddress localAddress, User user) throws IOException {
     AsyncRegistry registry = AsyncRegistryFactory.getRegistry(conf);
     String clusterId = FutureUtils.get(registry.getClusterId());
-    return new AsyncConnectionImpl(conf, registry, clusterId, localAddress, user);
+    return new AsyncClusterConnectionImpl(conf, registry, clusterId, localAddress, user);
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 9f41a76..c39c86c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -37,7 +37,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 
 @InterfaceAudience.Private
@@ -55,20 +56,18 @@ public class ReplicationProtbufUtil {
   public static void replicateWALEntry(AsyncRegionServerAdmin admin, Entry[] entries,
       String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir)
       throws IOException {
-    Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(
-      entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
+    Pair<ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(entries, null,
+      replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
     FutureUtils.get(admin.replicateWALEntry(p.getFirst(), p.getSecond()));
   }
 
   /**
    * Create a new ReplicateWALEntryRequest from a list of WAL entries
-   *
    * @param entries the WAL entries to be replicated
-   * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values
-   * found.
+   * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found.
    */
-  public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>
-      buildReplicateWALEntryRequest(final Entry[] entries) throws IOException {
+  public static Pair<ReplicateWALEntryRequest, CellScanner> buildReplicateWALEntryRequest(
+      final Entry[] entries) {
     return buildReplicateWALEntryRequest(entries, null, null, null, null);
   }
 
@@ -82,16 +81,14 @@ public class ReplicationProtbufUtil {
    * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
    * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found.
    */
-  public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>
-      buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName,
-          String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir)
-          throws IOException {
+  public static Pair<ReplicateWALEntryRequest, CellScanner> buildReplicateWALEntryRequest(
+      final Entry[] entries, byte[] encodedRegionName, String replicationClusterId,
+      Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) {
     // Accumulate all the Cells seen in here.
     List<List<? extends Cell>> allCells = new ArrayList<>(entries.length);
     int size = 0;
-    AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder();
-    AdminProtos.ReplicateWALEntryRequest.Builder builder =
-      AdminProtos.ReplicateWALEntryRequest.newBuilder();
+    WALEntry.Builder entryBuilder = WALEntry.newBuilder();
+    ReplicateWALEntryRequest.Builder builder = ReplicateWALEntryRequest.newBuilder();
 
     for (Entry entry: entries) {
       entryBuilder.clear();
@@ -99,8 +96,8 @@ public class ReplicationProtbufUtil {
       try {
         keyBuilder = entry.getKey().getBuilder(WALCellCodec.getNoneCompressor());
       } catch (IOException e) {
-        throw new IOException(
-            "There should not throw exception since NoneCompressor do not throw any exceptions", e);
+        throw new AssertionError(
+          "There should not throw exception since NoneCompressor do not throw any exceptions", e);
       }
       if(encodedRegionName != null){
         keyBuilder.setEncodedRegionName(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index 0729203..cc798cc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -185,7 +185,6 @@ public class RegionReplicaFlushHandler extends EventHandler {
             "Was not able to trigger a flush from primary region due to old server version? " +
               "Continuing to open the secondary region replica: " +
               region.getRegionInfo().getRegionNameAsString());
-          region.setReadsEnabled(true);
           break;
         }
       }
@@ -195,6 +194,6 @@ public class RegionReplicaFlushHandler extends EventHandler {
         throw new InterruptedIOException(e.getMessage());
       }
     }
+    region.setReadsEnabled(true);
   }
-
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
index f4c37b1..ca73663 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
@@ -29,6 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
@@ -53,6 +54,7 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener {
 
   @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
   class Context {
+    private final Server server;
     private final Configuration localConf;
     private final Configuration conf;
     private final FileSystem fs;
@@ -64,16 +66,11 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener {
     private final Abortable abortable;
 
     @InterfaceAudience.Private
-    public Context(
-        final Configuration localConf,
-        final Configuration conf,
-        final FileSystem fs,
-        final String peerId,
-        final UUID clusterId,
-        final ReplicationPeer replicationPeer,
-        final MetricsSource metrics,
-        final TableDescriptors tableDescriptors,
-        final Abortable abortable) {
+    public Context(final Server server, final Configuration localConf, final Configuration conf,
+        final FileSystem fs, final String peerId, final UUID clusterId,
+        final ReplicationPeer replicationPeer, final MetricsSource metrics,
+        final TableDescriptors tableDescriptors, final Abortable abortable) {
+      this.server = server;
       this.localConf = localConf;
       this.conf = conf;
       this.fs = fs;
@@ -84,34 +81,50 @@ public interface ReplicationEndpoint extends ReplicationPeerConfigListener {
       this.tableDescriptors = tableDescriptors;
       this.abortable = abortable;
     }
+
+    public Server getServer() {
+      return server;
+    }
+
     public Configuration getConfiguration() {
       return conf;
     }
+
     public Configuration getLocalConfiguration() {
       return localConf;
     }
+
     public FileSystem getFilesystem() {
       return fs;
     }
+
     public UUID getClusterId() {
       return clusterId;
     }
+
     public String getPeerId() {
       return peerId;
     }
+
     public ReplicationPeerConfig getPeerConfig() {
       return replicationPeer.getPeerConfig();
     }
+
     public ReplicationPeer getReplicationPeer() {
       return replicationPeer;
     }
+
     public MetricsSource getMetrics() {
       return metrics;
     }
+
     public TableDescriptors getTableDescriptors() {
       return tableDescriptors;
     }
-    public Abortable getAbortable() { return abortable; }
+
+    public Abortable getAbortable() {
+      return abortable;
+    }
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index f7721e0..65cf9a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -19,67 +19,47 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.Callable;
+import java.util.Optional;
+import java.util.TreeMap;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.RetryingCallable;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
+import org.apache.hadoop.hbase.util.AtomicUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
-import org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
-import org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
-import org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
-import org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
-import org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.cache.Cache;
 import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
+import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
+import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
 
 /**
- * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint
- * which receives the WAL edits from the WAL, and sends the edits to replicas
- * of regions.
+ * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint which receives the WAL
+ * edits from the WAL, and sends the edits to replicas of regions.
  */
 @InterfaceAudience.Private
 public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
@@ -87,32 +67,55 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
   private static final Logger LOG = LoggerFactory.getLogger(RegionReplicaReplicationEndpoint.class);
 
   // Can be configured differently than hbase.client.retries.number
-  private static String CLIENT_RETRIES_NUMBER
-    = "hbase.region.replica.replication.client.retries.number";
+  private static String CLIENT_RETRIES_NUMBER =
+    "hbase.region.replica.replication.client.retries.number";
 
   private Configuration conf;
-  private ClusterConnection connection;
+  private AsyncClusterConnection connection;
   private TableDescriptors tableDescriptors;
 
-  // Reuse WALSplitter constructs as a WAL pipe
-  private PipelineController controller;
-  private RegionReplicaOutputSink outputSink;
-  private EntryBuffers entryBuffers;
+  private int numRetries;
+
+  private long operationTimeoutNs;
 
-  // Number of writer threads
-  private int numWriterThreads;
+  private LoadingCache<TableName, Optional<TableDescriptor>> tableDescriptorCache;
 
-  private int operationTimeout;
+  private Cache<TableName, TableName> disabledTableCache;
 
-  private ExecutorService pool;
+  private final RetryCounterFactory retryCounterFactory =
+    new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60000);
 
   @Override
   public void init(Context context) throws IOException {
     super.init(context);
-
-    this.conf = HBaseConfiguration.create(context.getConfiguration());
+    this.conf = context.getConfiguration();
     this.tableDescriptors = context.getTableDescriptors();
-
+    int memstoreReplicationEnabledCacheExpiryMs = conf
+      .getInt("hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000);
+    // A cache for the table "memstore replication enabled" flag.
+    // It has a default expiry of 5 sec. This means that if the table is altered
+    // with a different flag value, we might miss to replicate for that amount of
+    // time. But this cache avoid the slow lookup and parsing of the TableDescriptor.
+    tableDescriptorCache = CacheBuilder.newBuilder()
+      .expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS)
+      .initialCapacity(10).maximumSize(1000)
+      .build(new CacheLoader<TableName, Optional<TableDescriptor>>() {
+
+        @Override
+        public Optional<TableDescriptor> load(TableName tableName) throws Exception {
+          // check if the table requires memstore replication
+          // some unit-test drop the table, so we should do a bypass check and always replicate.
+          return Optional.ofNullable(tableDescriptors.get(tableName));
+        }
+      });
+    int nonExistentTableCacheExpiryMs =
+      conf.getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000);
+    // A cache for non existing tables that have a default expiry of 5 sec. This means that if the
+    // table is created again with the same name, we might miss to replicate for that amount of
+    // time. But this cache prevents overloading meta requests for every edit from a deleted file.
+    disabledTableCache = CacheBuilder.newBuilder()
+      .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS).initialCapacity(10)
+      .maximumSize(1000).build();
     // HRS multiplies client retries by 10 globally for meta operations, but we do not want this.
     // We are resetting it here because we want default number of retries (35) rather than 10 times
     // that which makes very long retries for disabled tables etc.
@@ -123,516 +126,261 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
         HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
       defaultNumRetries = defaultNumRetries / mult; // reset if HRS has multiplied this already
     }
-
-    conf.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 1);
-    int numRetries = conf.getInt(CLIENT_RETRIES_NUMBER, defaultNumRetries);
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries);
-
-    this.numWriterThreads = this.conf.getInt(
-      "hbase.region.replica.replication.writer.threads", 3);
-    controller = new PipelineController();
-    entryBuffers = new EntryBuffers(controller,
-        this.conf.getLong("hbase.region.replica.replication.buffersize", 128 * 1024 * 1024));
-
+    this.numRetries = conf.getInt(CLIENT_RETRIES_NUMBER, defaultNumRetries);
     // use the regular RPC timeout for replica replication RPC's
-    this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-  }
-
-  @Override
-  protected void doStart() {
-    try {
-      connection = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
-      this.pool = getDefaultThreadPool(conf);
-      outputSink = new RegionReplicaOutputSink(controller, tableDescriptors, entryBuffers,
-        connection, pool, numWriterThreads, operationTimeout);
-      outputSink.startWriterThreads();
-      super.doStart();
-    } catch (IOException ex) {
-      LOG.warn("Received exception while creating connection :" + ex);
-      notifyFailed(ex);
-    }
-  }
-
-  @Override
-  protected void doStop() {
-    if (outputSink != null) {
-      try {
-        outputSink.finishWritingAndClose();
-      } catch (IOException ex) {
-        LOG.warn("Got exception while trying to close OutputSink", ex);
-      }
-    }
-    if (this.pool != null) {
-      this.pool.shutdownNow();
-      try {
-        // wait for 10 sec
-        boolean shutdown = this.pool.awaitTermination(10000, TimeUnit.MILLISECONDS);
-        if (!shutdown) {
-          LOG.warn("Failed to shutdown the thread pool after 10 seconds");
-        }
-      } catch (InterruptedException e) {
-        LOG.warn("Got interrupted while waiting for the thread pool to shut down" + e);
-      }
-    }
-    if (connection != null) {
-      try {
-        connection.close();
-      } catch (IOException ex) {
-        LOG.warn("Got exception closing connection :" + ex);
-      }
-    }
-    super.doStop();
+    this.operationTimeoutNs =
+      TimeUnit.MILLISECONDS.toNanos(conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
+    this.connection = context.getServer().getAsyncClusterConnection();
   }
 
   /**
-   * Returns a Thread pool for the RPC's to region replicas. Similar to
-   * Connection's thread pool.
+   * returns true if the specified entry must be replicated. We should always replicate meta
+   * operations (e.g. flush) and use the user HTD flag to decide whether or not replicate the
+   * memstore.
    */
-  private ExecutorService getDefaultThreadPool(Configuration conf) {
-    int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256);
-    if (maxThreads == 0) {
-      maxThreads = Runtime.getRuntime().availableProcessors() * 8;
+  private boolean requiresReplication(Optional<TableDescriptor> tableDesc, Entry entry) {
+    // empty edit does not need to be replicated
+    if (entry.getEdit().isEmpty() || !tableDesc.isPresent()) {
+      return false;
     }
-    long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60);
-    LinkedBlockingQueue<Runnable> workQueue =
-        new LinkedBlockingQueue<>(maxThreads *
-            conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-              HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-    ThreadPoolExecutor tpe = new ThreadPoolExecutor(
-      maxThreads,
-      maxThreads,
-      keepAliveTime,
-      TimeUnit.SECONDS,
-      workQueue,
-      Threads.newDaemonThreadFactory(this.getClass().getSimpleName() + "-rpc-shared-"));
-    tpe.allowCoreThreadTimeOut(true);
-    return tpe;
+    // meta edits (e.g. flush) must be always replicated
+    return entry.getEdit().isMetaEdit() || tableDesc.get().hasRegionMemStoreReplication();
   }
 
-  @Override
-  public boolean replicate(ReplicateContext replicateContext) {
-    /* A note on batching in RegionReplicaReplicationEndpoint (RRRE):
-     *
-     * RRRE relies on batching from two different mechanisms. The first is the batching from
-     * ReplicationSource since RRRE is a ReplicationEndpoint driven by RS. RS reads from a single
-     * WAL file filling up a buffer of heap size "replication.source.size.capacity"(64MB) or at most
-     * "replication.source.nb.capacity" entries or until it sees the end of file (in live tailing).
-     * Then RS passes all the buffered edits in this replicate() call context. RRRE puts the edits
-     * to the WALSplitter.EntryBuffers which is a blocking buffer space of up to
-     * "hbase.region.replica.replication.buffersize" (128MB) in size. This buffer splits the edits
-     * based on regions.
-     *
-     * There are "hbase.region.replica.replication.writer.threads"(default 3) writer threads which
-     * pick largest per-region buffer and send it to the SinkWriter (see RegionReplicaOutputSink).
-     * The SinkWriter in this case will send the wal edits to all secondary region replicas in
-     * parallel via a retrying rpc call. EntryBuffers guarantees that while a buffer is
-     * being written to the sink, another buffer for the same region will not be made available to
-     * writers ensuring regions edits are not replayed out of order.
-     *
-     * The replicate() call won't return until all the buffers are sent and ack'd by the sinks so
-     * that the replication can assume all edits are persisted. We may be able to do a better
-     * pipelining between the replication thread and output sinks later if it becomes a bottleneck.
-     */
-
-    while (this.isRunning()) {
-      try {
-        for (Entry entry: replicateContext.getEntries()) {
-          entryBuffers.appendEntry(entry);
+  private void getRegionLocations(CompletableFuture<RegionLocations> future,
+      TableDescriptor tableDesc, byte[] encodedRegionName, byte[] row, boolean reload) {
+    FutureUtils.addListener(connection.getRegionLocations(tableDesc.getTableName(), row, reload),
+      (r, e) -> {
+        if (e != null) {
+          future.completeExceptionally(e);
+          return;
         }
-        outputSink.flush(); // make sure everything is flushed
-        ctx.getMetrics().incrLogEditsFiltered(
-          outputSink.getSkippedEditsCounter().getAndSet(0));
-        return true;
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        return false;
-      } catch (IOException e) {
-        LOG.warn("Received IOException while trying to replicate"
-            + StringUtils.stringifyException(e));
-      }
-    }
-
-    return false;
-  }
-
-  @Override
-  public boolean canReplicateToSameCluster() {
-    return true;
-  }
-
-  @Override
-  protected WALEntryFilter getScopeWALEntryFilter() {
-    // we do not care about scope. We replicate everything.
-    return null;
+        // if we are not loading from cache, just return
+        if (reload) {
+          future.complete(r);
+          return;
+        }
+        // check if the number of region replicas is correct, and also the primary region name
+        // matches
+        if (r.size() == tableDesc.getRegionReplication() && Bytes.equals(
+          r.getDefaultRegionLocation().getRegion().getEncodedNameAsBytes(), encodedRegionName)) {
+          future.complete(r);
+        } else {
+          // reload again as the information in cache maybe stale
+          getRegionLocations(future, tableDesc, encodedRegionName, row, true);
+        }
+      });
   }
 
-  static class RegionReplicaOutputSink extends OutputSink {
-    private final RegionReplicaSinkWriter sinkWriter;
-    private final TableDescriptors tableDescriptors;
-    private final Cache<TableName, Boolean> memstoreReplicationEnabled;
-
-    public RegionReplicaOutputSink(PipelineController controller, TableDescriptors tableDescriptors,
-        EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool,
-        int numWriters, int operationTimeout) {
-      super(controller, entryBuffers, numWriters);
-      this.sinkWriter =
-          new RegionReplicaSinkWriter(this, connection, pool, operationTimeout, tableDescriptors);
-      this.tableDescriptors = tableDescriptors;
-
-      // A cache for the table "memstore replication enabled" flag.
-      // It has a default expiry of 5 sec. This means that if the table is altered
-      // with a different flag value, we might miss to replicate for that amount of
-      // time. But this cache avoid the slow lookup and parsing of the TableDescriptor.
-      int memstoreReplicationEnabledCacheExpiryMs = connection.getConfiguration()
-        .getInt("hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000);
-      this.memstoreReplicationEnabled = CacheBuilder.newBuilder()
-        .expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS)
-        .initialCapacity(10)
-        .maximumSize(1000)
-        .build();
+  private void replicate(CompletableFuture<Long> future, RegionLocations locs,
+      TableDescriptor tableDesc, byte[] encodedRegionName, byte[] row, List<Entry> entries) {
+    if (locs.size() == 1) {
+      // Could this happen?
+      future.complete(Long.valueOf(entries.size()));
+      return;
     }
-
-    @Override
-    public void append(RegionEntryBuffer buffer) throws IOException {
-      List<Entry> entries = buffer.getEntryBuffer();
-
-      if (entries.isEmpty() || entries.get(0).getEdit().getCells().isEmpty()) {
-        return;
-      }
-
-      // meta edits (e.g. flush) are always replicated.
-      // data edits (e.g. put) are replicated if the table requires them.
-      if (!requiresReplication(buffer.getTableName(), entries)) {
-        return;
+    if (!Bytes.equals(locs.getDefaultRegionLocation().getRegion().getEncodedNameAsBytes(),
+      encodedRegionName)) {
+      // the region name is not equal, this usually means the region has been split or merged, so
+      // give up replicating as the new region(s) should already have all the data of the parent
+      // region(s).
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+          "Skipping {} entries in table {} because located region {} is different than" +
+            " the original region {} from WALEdit",
+          tableDesc.getTableName(), locs.getDefaultRegionLocation().getRegion().getEncodedName(),
+          Bytes.toStringBinary(encodedRegionName));
       }
-
-      sinkWriter.append(buffer.getTableName(), buffer.getEncodedRegionName(),
-        CellUtil.cloneRow(entries.get(0).getEdit().getCells().get(0)), entries);
-    }
-
-    @Override
-    public boolean flush() throws IOException {
-      // nothing much to do for now. Wait for the Writer threads to finish up
-      // append()'ing the data.
-      entryBuffers.waitUntilDrained();
-      return super.flush();
-    }
-
-    @Override
-    public boolean keepRegionEvent(Entry entry) {
-      return true;
-    }
-
-    @Override
-    public List<Path> finishWritingAndClose() throws IOException {
-      finishWriting(true);
-      return null;
-    }
-
-    @Override
-    public Map<byte[], Long> getOutputCounts() {
-      return null; // only used in tests
-    }
-
-    @Override
-    public int getNumberOfRecoveredRegions() {
-      return 0;
-    }
-
-    AtomicLong getSkippedEditsCounter() {
-      return skippedEdits;
+      future.complete(Long.valueOf(entries.size()));
+      return;
     }
-
-    /**
-     * returns true if the specified entry must be replicated.
-     * We should always replicate meta operations (e.g. flush)
-     * and use the user HTD flag to decide whether or not replicate the memstore.
-     */
-    private boolean requiresReplication(final TableName tableName, final List<Entry> entries)
-        throws IOException {
-      // unit-tests may not the TableDescriptors, bypass the check and always replicate
-      if (tableDescriptors == null) return true;
-
-      Boolean requiresReplication = memstoreReplicationEnabled.getIfPresent(tableName);
-      if (requiresReplication == null) {
-        // check if the table requires memstore replication
-        // some unit-test drop the table, so we should do a bypass check and always replicate.
-        TableDescriptor htd = tableDescriptors.get(tableName);
-        requiresReplication = htd == null || htd.hasRegionMemStoreReplication();
-        memstoreReplicationEnabled.put(tableName, requiresReplication);
-      }
-
-      // if memstore replication is not required, check the entries.
-      // meta edits (e.g. flush) must be always replicated.
-      if (!requiresReplication) {
-        int skipEdits = 0;
-        java.util.Iterator<Entry> it = entries.iterator();
-        while (it.hasNext()) {
-          Entry entry = it.next();
-          if (entry.getEdit().isMetaEdit()) {
-            requiresReplication = true;
+    AtomicReference<Throwable> error = new AtomicReference<>();
+    AtomicInteger remainingTasks = new AtomicInteger(locs.size() - 1);
+    AtomicLong skippedEdits = new AtomicLong(0);
+
+    for (int i = 1, n = locs.size(); i < n; i++) {
+      final int replicaId = i;
+      FutureUtils.addListener(connection.replay(tableDesc.getTableName(),
+        locs.getRegionLocation(replicaId).getRegion().getEncodedNameAsBytes(), row, entries,
+        replicaId, numRetries, operationTimeoutNs), (r, e) -> {
+          if (e != null) {
+            LOG.warn("Failed to replicate to {}", locs.getRegionLocation(replicaId), e);
+            error.compareAndSet(null, e);
           } else {
-            it.remove();
-            skipEdits++;
+            AtomicUtils.updateMax(skippedEdits, r.longValue());
           }
-        }
-        skippedEdits.addAndGet(skipEdits);
-      }
-      return requiresReplication;
+          if (remainingTasks.decrementAndGet() == 0) {
+            if (error.get() != null) {
+              future.completeExceptionally(error.get());
+            } else {
+              future.complete(skippedEdits.get());
+            }
+          }
+        });
     }
   }
 
-  static class RegionReplicaSinkWriter extends SinkWriter {
-    RegionReplicaOutputSink sink;
-    ClusterConnection connection;
-    RpcControllerFactory rpcControllerFactory;
-    RpcRetryingCallerFactory rpcRetryingCallerFactory;
-    int operationTimeout;
-    ExecutorService pool;
-    Cache<TableName, Boolean> disabledAndDroppedTables;
-    TableDescriptors tableDescriptors;
-
-    public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection,
-        ExecutorService pool, int operationTimeout, TableDescriptors tableDescriptors) {
-      this.sink = sink;
-      this.connection = connection;
-      this.operationTimeout = operationTimeout;
-      this.rpcRetryingCallerFactory
-        = RpcRetryingCallerFactory.instantiate(connection.getConfiguration());
-      this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration());
-      this.pool = pool;
-      this.tableDescriptors = tableDescriptors;
-
-      int nonExistentTableCacheExpiryMs = connection.getConfiguration()
-        .getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000);
-      // A cache for non existing tables that have a default expiry of 5 sec. This means that if the
-      // table is created again with the same name, we might miss to replicate for that amount of
-      // time. But this cache prevents overloading meta requests for every edit from a deleted file.
-      disabledAndDroppedTables = CacheBuilder.newBuilder()
-        .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS)
-        .initialCapacity(10)
-        .maximumSize(1000)
-        .build();
+  private void logSkipped(TableName tableName, List<Entry> entries, String reason) {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Skipping {} entries because table {} is {}", entries.size(), tableName, reason);
+      for (Entry entry : entries) {
+        LOG.trace("Skipping : {}", entry);
+      }
     }
+  }
 
-    public void append(TableName tableName, byte[] encodedRegionName, byte[] row,
-        List<Entry> entries) throws IOException {
-
-      if (disabledAndDroppedTables.getIfPresent(tableName) != null) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Skipping " + entries.size() + " entries because table " + tableName
-            + " is cached as a disabled or dropped table");
-          for (Entry entry : entries) {
-            LOG.trace("Skipping : " + entry);
-          }
-        }
-        sink.getSkippedEditsCounter().addAndGet(entries.size());
-        return;
+  private CompletableFuture<Long> replicate(TableDescriptor tableDesc, byte[] encodedRegionName,
+      List<Entry> entries) {
+    if (disabledTableCache.getIfPresent(tableDesc.getTableName()) != null) {
+      logSkipped(tableDesc.getTableName(), entries, "cached as a disabled table");
+      return CompletableFuture.completedFuture(Long.valueOf(entries.size()));
+    }
+    byte[] row = CellUtil.cloneRow(entries.get(0).getEdit().getCells().get(0));
+    CompletableFuture<RegionLocations> locateFuture = new CompletableFuture<>();
+    getRegionLocations(locateFuture, tableDesc, encodedRegionName, row, false);
+    CompletableFuture<Long> future = new CompletableFuture<>();
+    FutureUtils.addListener(locateFuture, (locs, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+      } else {
+        replicate(future, locs, tableDesc, encodedRegionName, row, entries);
       }
+    });
+    return future;
+  }
 
-      // If the table is disabled or dropped, we should not replay the entries, and we can skip
-      // replaying them. However, we might not know whether the table is disabled until we
-      // invalidate the cache and check from meta
-      RegionLocations locations = null;
-      boolean useCache = true;
-      while (true) {
-        // get the replicas of the primary region
+  @Override
+  public boolean replicate(ReplicateContext replicateContext) {
+    Map<byte[], Pair<TableDescriptor, List<Entry>>> encodedRegionName2Entries =
+      new TreeMap<>(Bytes.BYTES_COMPARATOR);
+    long skippedEdits = 0;
+    RetryCounter retryCounter = retryCounterFactory.create();
+    outer: while (isRunning()) {
+      encodedRegionName2Entries.clear();
+      skippedEdits = 0;
+      for (Entry entry : replicateContext.getEntries()) {
+        Optional<TableDescriptor> tableDesc;
         try {
-          locations = RegionReplicaReplayCallable
-              .getRegionLocations(connection, tableName, row, useCache, 0);
-
-          if (locations == null) {
-            throw new HBaseIOException("Cannot locate locations for "
-                + tableName + ", row:" + Bytes.toStringBinary(row));
+          tableDesc = tableDescriptorCache.get(entry.getKey().getTableName());
+        } catch (ExecutionException e) {
+          LOG.warn("Failed to load table descriptor for {}, attempts={}",
+            entry.getKey().getTableName(), retryCounter.getAttemptTimes(), e.getCause());
+          if (!retryCounter.shouldRetry()) {
+            return false;
           }
-        } catch (TableNotFoundException e) {
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("Skipping " + entries.size() + " entries because table " + tableName
-              + " is dropped. Adding table to cache.");
-            for (Entry entry : entries) {
-              LOG.trace("Skipping : " + entry);
-            }
+          try {
+            retryCounter.sleepUntilNextRetry();
+          } catch (InterruptedException e1) {
+            // restore the interrupted state
+            Thread.currentThread().interrupt();
+            return false;
           }
-          disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache. Value ignored
-          // skip this entry
-          sink.getSkippedEditsCounter().addAndGet(entries.size());
-          return;
+          continue outer;
         }
-
-        // check whether we should still replay this entry. If the regions are changed, or the
-        // entry is not coming from the primary region, filter it out.
-        HRegionLocation primaryLocation = locations.getDefaultRegionLocation();
-        if (!Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(),
-          encodedRegionName)) {
-          if (useCache) {
-            useCache = false;
-            continue; // this will retry location lookup
-          }
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("Skipping " + entries.size() + " entries in table " + tableName
-              + " because located region " + primaryLocation.getRegionInfo().getEncodedName()
-              + " is different than the original region " + Bytes.toStringBinary(encodedRegionName)
-              + " from WALEdit");
-            for (Entry entry : entries) {
-              LOG.trace("Skipping : " + entry);
-            }
-          }
-          sink.getSkippedEditsCounter().addAndGet(entries.size());
-          return;
+        if (!requiresReplication(tableDesc, entry)) {
+          skippedEdits++;
+          continue;
         }
-        break;
+        byte[] encodedRegionName = entry.getKey().getEncodedRegionName();
+        encodedRegionName2Entries
+          .computeIfAbsent(encodedRegionName, k -> Pair.newPair(tableDesc.get(), new ArrayList<>()))
+          .getSecond().add(entry);
       }
-
-      if (locations.size() == 1) {
-        return;
-      }
-
-      ArrayList<Future<ReplicateWALEntryResponse>> tasks = new ArrayList<>(locations.size() - 1);
-
-      // All passed entries should belong to one region because it is coming from the EntryBuffers
-      // split per region. But the regions might split and merge (unlike log recovery case).
-      for (int replicaId = 0; replicaId < locations.size(); replicaId++) {
-        HRegionLocation location = locations.getRegionLocation(replicaId);
-        if (!RegionReplicaUtil.isDefaultReplica(replicaId)) {
-          RegionInfo regionInfo = location == null
-              ? RegionReplicaUtil.getRegionInfoForReplica(
-                locations.getDefaultRegionLocation().getRegionInfo(), replicaId)
-              : location.getRegionInfo();
-          RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection,
-            rpcControllerFactory, tableName, location, regionInfo, row, entries,
-            sink.getSkippedEditsCounter());
-           Future<ReplicateWALEntryResponse> task = pool.submit(
-             new RetryingRpcCallable<>(rpcRetryingCallerFactory, callable, operationTimeout));
-           tasks.add(task);
-        }
+      break;
+    }
+    // send the request to regions
+    retryCounter = retryCounterFactory.create();
+    while (isRunning()) {
+      List<Pair<CompletableFuture<Long>, byte[]>> futureAndEncodedRegionNameList =
+        new ArrayList<Pair<CompletableFuture<Long>, byte[]>>();
+      for (Map.Entry<byte[], Pair<TableDescriptor, List<Entry>>> entry : encodedRegionName2Entries
+        .entrySet()) {
+        CompletableFuture<Long> future =
+          replicate(entry.getValue().getFirst(), entry.getKey(), entry.getValue().getSecond());
+        futureAndEncodedRegionNameList.add(Pair.newPair(future, entry.getKey()));
       }
-
-      boolean tasksCancelled = false;
-      for (int replicaId = 0; replicaId < tasks.size(); replicaId++) {
+      for (Pair<CompletableFuture<Long>, byte[]> pair : futureAndEncodedRegionNameList) {
+        byte[] encodedRegionName = pair.getSecond();
         try {
-          tasks.get(replicaId).get();
+          skippedEdits += pair.getFirst().get().longValue();
+          encodedRegionName2Entries.remove(encodedRegionName);
         } catch (InterruptedException e) {
-          throw new InterruptedIOException(e.getMessage());
+          // restore the interrupted state
+          Thread.currentThread().interrupt();
+          return false;
         } catch (ExecutionException e) {
+          Pair<TableDescriptor, List<Entry>> tableAndEntries =
+            encodedRegionName2Entries.get(encodedRegionName);
+          TableName tableName = tableAndEntries.getFirst().getTableName();
+          List<Entry> entries = tableAndEntries.getSecond();
           Throwable cause = e.getCause();
-          boolean canBeSkipped = false;
-          if (cause instanceof IOException) {
-            // The table can be disabled or dropped at this time. For disabled tables, we have no
-            // cheap mechanism to detect this case because meta does not contain this information.
-            // ClusterConnection.isTableDisabled() is a zk call which we cannot do for every replay
-            // RPC. So instead we start the replay RPC with retries and check whether the table is
-            // dropped or disabled which might cause SocketTimeoutException, or
-            // RetriesExhaustedException or similar if we get IOE.
-            if (cause instanceof TableNotFoundException
-                || connection.isTableDisabled(tableName)) {
-              disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later.
-              canBeSkipped = true;
-            } else if (tableDescriptors != null) {
-              TableDescriptor tableDescriptor = tableDescriptors.get(tableName);
-              if (tableDescriptor != null
-                  //(replicaId + 1) as no task is added for primary replica for replication
-                  && tableDescriptor.getRegionReplication() <= (replicaId + 1)) {
-                canBeSkipped = true;
-              }
-            }
-            if (canBeSkipped) {
-              if (LOG.isTraceEnabled()) {
-                LOG.trace("Skipping " + entries.size() + " entries in table " + tableName
-                    + " because received exception for dropped or disabled table",
-                  cause);
-                for (Entry entry : entries) {
-                  LOG.trace("Skipping : " + entry);
-                }
-              }
-              if (!tasksCancelled) {
-                sink.getSkippedEditsCounter().addAndGet(entries.size());
-                tasksCancelled = true; // so that we do not add to skipped counter again
-              }
-              continue;
-            }
-
-            // otherwise rethrow
-            throw (IOException)cause;
+          // The table can be disabled or dropped at this time. For disabled tables, we have no
+          // cheap mechanism to detect this case because meta does not contain this information.
+          // ClusterConnection.isTableDisabled() is a zk call which we cannot do for every replay
+          // RPC. So instead we start the replay RPC with retries and check whether the table is
+          // dropped or disabled which might cause SocketTimeoutException, or
+          // RetriesExhaustedException or similar if we get IOE.
+          if (cause instanceof TableNotFoundException) {
+            // add to cache that the table does not exist
+            tableDescriptorCache.put(tableName, Optional.empty());
+            logSkipped(tableName, entries, "dropped");
+            skippedEdits += entries.size();
+            encodedRegionName2Entries.remove(encodedRegionName);
+            continue;
+          }
+          boolean disabled = false;
+          try {
+            disabled = connection.getAdmin().isTableDisabled(tableName).get();
+          } catch (InterruptedException e1) {
+            // restore the interrupted state
+            Thread.currentThread().interrupt();
+            return false;
+          } catch (ExecutionException e1) {
+            LOG.warn("Failed to test whether {} is disabled, assume it is not disabled", tableName,
+              e1.getCause());
+          }
+          if (disabled) {
+            disabledTableCache.put(tableName, tableName);
+            logSkipped(tableName, entries, "disabled");
+            skippedEdits += entries.size();
+            encodedRegionName2Entries.remove(encodedRegionName);
+            continue;
           }
-          // unexpected exception
-          throw new IOException(cause);
+          LOG.warn("Failed to replicate {} entries for region {} of table {}", entries.size(),
+            Bytes.toStringBinary(encodedRegionName), tableName);
+        }
+      }
+      // we have done
+      if (encodedRegionName2Entries.isEmpty()) {
+        ctx.getMetrics().incrLogEditsFiltered(skippedEdits);
+        return true;
+      } else {
+        LOG.warn("Failed to replicate all entris, retry={}", retryCounter.getAttemptTimes());
+        if (!retryCounter.shouldRetry()) {
+          return false;
+        }
+        try {
+          retryCounter.sleepUntilNextRetry();
+        } catch (InterruptedException e) {
+          // restore the interrupted state
+          Thread.currentThread().interrupt();
+          return false;
         }
       }
     }
-  }
 
-  static class RetryingRpcCallable<V> implements Callable<V> {
-    RpcRetryingCallerFactory factory;
-    RetryingCallable<V> callable;
-    int timeout;
-    public RetryingRpcCallable(RpcRetryingCallerFactory factory, RetryingCallable<V> callable,
-        int timeout) {
-      this.factory = factory;
-      this.callable = callable;
-      this.timeout = timeout;
-    }
-    @Override
-    public V call() throws Exception {
-      return factory.<V>newCaller().callWithRetries(callable, timeout);
-    }
+    return false;
   }
 
-  /**
-   * Calls replay on the passed edits for the given set of entries belonging to the region. It skips
-   * the entry if the region boundaries have changed or the region is gone.
-   */
-  static class RegionReplicaReplayCallable extends
-      RegionAdminServiceCallable<ReplicateWALEntryResponse> {
-    private final List<Entry> entries;
-    private final byte[] initialEncodedRegionName;
-    private final AtomicLong skippedEntries;
-
-    public RegionReplicaReplayCallable(ClusterConnection connection,
-        RpcControllerFactory rpcControllerFactory, TableName tableName,
-        HRegionLocation location, RegionInfo regionInfo, byte[] row,List<Entry> entries,
-        AtomicLong skippedEntries) {
-      super(connection, rpcControllerFactory, location, tableName, row, regionInfo.getReplicaId());
-      this.entries = entries;
-      this.skippedEntries = skippedEntries;
-      this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes();
-    }
-
-    @Override
-    public ReplicateWALEntryResponse call(HBaseRpcController controller) throws Exception {
-      // Check whether we should still replay this entry. If the regions are changed, or the
-      // entry is not coming form the primary region, filter it out because we do not need it.
-      // Regions can change because of (1) region split (2) region merge (3) table recreated
-      boolean skip = false;
-      if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(),
-          initialEncodedRegionName)) {
-        skip = true;
-      }
-      if (!this.entries.isEmpty() && !skip) {
-        Entry[] entriesArray = new Entry[this.entries.size()];
-        entriesArray = this.entries.toArray(entriesArray);
-
-        // set the region name for the target region replica
-        Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
-            ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray, location
-                .getRegionInfo().getEncodedNameAsBytes(), null, null, null);
-        controller.setCellScanner(p.getSecond());
-        return stub.replay(controller, p.getFirst());
-      }
+  @Override
+  public boolean canReplicateToSameCluster() {
+    return true;
+  }
 
-      if (skip) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Skipping " + entries.size() + " entries in table " + tableName
-            + " because located region " + location.getRegionInfo().getEncodedName()
-            + " is different than the original region "
-            + Bytes.toStringBinary(initialEncodedRegionName) + " from WALEdit");
-          for (Entry entry : entries) {
-            LOG.trace("Skipping : " + entry);
-          }
-        }
-        skippedEntries.addAndGet(entries.size());
-      }
-      return ReplicateWALEntryResponse.newBuilder().build();
-    }
+  @Override
+  protected WALEntryFilter getScopeWALEntryFilter() {
+    // we do not care about scope. We replicate everything.
+    return null;
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 8e001e6..b58fce3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -283,7 +283,7 @@ public class ReplicationSource implements ReplicationSourceInterface {
       tableDescriptors = ((HRegionServer) server).getTableDescriptors();
     }
     replicationEndpoint
-      .init(new ReplicationEndpoint.Context(conf, replicationPeer.getConfiguration(), fs,
+      .init(new ReplicationEndpoint.Context(server, conf, replicationPeer.getConfiguration(), fs,
         replicationPeer.getId(), clusterId, replicationPeer, metrics, tableDescriptors, server));
     replicationEndpoint.start();
     replicationEndpoint.awaitRunning(waitOnEndpointSeconds, TimeUnit.SECONDS);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
index 55da3f4..652d1d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
@@ -20,16 +20,17 @@ package org.apache.hadoop.hbase.replication.regionserver;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicLong;
+import java.util.UUID;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.Cell.Type;
 import org.apache.hadoop.hbase.CellBuilderFactory;
@@ -42,7 +43,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -51,12 +51,12 @@ import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -383,9 +383,8 @@ public class TestRegionReplicaReplicationEndpoint {
     testRegionReplicaReplicationIgnores(false, true);
   }
 
-  public void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disableReplication)
+  private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disableReplication)
       throws Exception {
-
     // tests having edits from a disabled or dropped table is handled correctly by skipping those
     // entries and further edits after the edits from dropped/disabled table can be replicated
     // without problems.
@@ -405,8 +404,7 @@ public class TestRegionReplicaReplicationEndpoint {
     HTU.getAdmin().createTable(htd);
 
     // both tables are created, now pause replication
-    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
-    admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
+    HTU.getAdmin().disableReplicationPeer(ServerRegionReplicaUtil.getReplicationPeerId());
 
     // now that the replication is disabled, write to the table to be dropped, then drop the table.
 
@@ -416,19 +414,9 @@ public class TestRegionReplicaReplicationEndpoint {
 
     HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);
 
-    AtomicLong skippedEdits = new AtomicLong();
-    RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink =
-        mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
-    when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
-    FSTableDescriptors fstd = new FSTableDescriptors(HTU.getConfiguration(),
-        FileSystem.get(HTU.getConfiguration()), HTU.getDefaultRootDirPath());
-    RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter =
-        new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink,
-            (ClusterConnection) connection, Executors.newSingleThreadExecutor(), Integer.MAX_VALUE,
-            fstd);
     RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
     HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
-    byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();
+    byte[] encodedRegionName = hrl.getRegion().getEncodedNameAsBytes();
 
     Cell cell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("A"))
         .setFamily(HTU.fam1).setValue(Bytes.toBytes("VAL")).setType(Type.Put).build();
@@ -436,7 +424,6 @@ public class TestRegionReplicaReplicationEndpoint {
       new WALKeyImpl(encodedRegionName, toBeDisabledTable, 1),
         new WALEdit()
             .add(cell));
-
     HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table
     if (dropTable) {
       HTU.getAdmin().deleteTable(toBeDisabledTable);
@@ -445,11 +432,23 @@ public class TestRegionReplicaReplicationEndpoint {
       HTU.getAdmin().modifyTable(htd);
       HTU.getAdmin().enableTable(toBeDisabledTable);
     }
-    sinkWriter.append(toBeDisabledTable, encodedRegionName,
-      HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));
-
-    assertEquals(2, skippedEdits.get());
 
+    HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(0);
+    MetricsSource metrics = mock(MetricsSource.class);
+    ReplicationEndpoint.Context ctx =
+      new ReplicationEndpoint.Context(rs, HTU.getConfiguration(), HTU.getConfiguration(),
+        HTU.getTestFileSystem(), ServerRegionReplicaUtil.getReplicationPeerId(),
+        UUID.fromString(rs.getClusterId()), rs.getReplicationSourceService().getReplicationPeers()
+          .getPeer(ServerRegionReplicaUtil.getReplicationPeerId()),
+        metrics, rs.getTableDescriptors(), rs);
+    RegionReplicaReplicationEndpoint rrpe = new RegionReplicaReplicationEndpoint();
+    rrpe.init(ctx);
+    rrpe.start();
+    ReplicationEndpoint.ReplicateContext repCtx = new ReplicationEndpoint.ReplicateContext();
+    repCtx.setEntries(Lists.newArrayList(entry, entry));
+    assertTrue(rrpe.replicate(repCtx));
+    verify(metrics, times(1)).incrLogEditsFiltered(eq(2L));
+    rrpe.stop();
     if (disableReplication) {
       // enable replication again so that we can verify replication
       HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table
@@ -460,17 +459,14 @@ public class TestRegionReplicaReplicationEndpoint {
 
     try {
       // load some data to the to-be-dropped table
-
       // load the data to the table
       HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
 
       // now enable the replication
-      admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
+      HTU.getAdmin().enableReplicationPeer(ServerRegionReplicaUtil.getReplicationPeerId());
 
       verifyReplication(tableName, regionReplication, 0, 1000);
-
     } finally {
-      admin.close();
       table.close();
       rl.close();
       tableToBeDisabled.close();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
index e91a8bd..0ec7d54 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
@@ -23,10 +23,12 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.Optional;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -36,24 +38,22 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.WALCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.WALObserver;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext;
-import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -73,8 +73,6 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
-
 /**
  * Tests RegionReplicaReplicationEndpoint. Unlike TestRegionReplicaReplicationEndpoint this
  * class contains lower level tests using callables.
@@ -178,39 +176,34 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
   public void testReplayCallable() throws Exception {
     // tests replaying the edits to a secondary region replica using the Callable directly
     openRegion(HTU, rs0, hriSecondary);
-    ClusterConnection connection =
-        (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration());
 
-    //load some data to primary
+    // load some data to primary
     HTU.loadNumericRows(table, f, 0, 1000);
 
     Assert.assertEquals(1000, entries.size());
-    // replay the edits to the secondary using replay callable
-    replicateUsingCallable(connection, entries);
+    try (AsyncClusterConnection conn = ClusterConnectionFactory
+      .createAsyncClusterConnection(HTU.getConfiguration(), null, User.getCurrent())) {
+      // replay the edits to the secondary using replay callable
+      replicateUsingCallable(conn, entries);
+    }
 
     Region region = rs0.getRegion(hriSecondary.getEncodedName());
     HTU.verifyNumericRows(region, f, 0, 1000);
 
     HTU.deleteNumericRows(table, f, 0, 1000);
     closeRegion(HTU, rs0, hriSecondary);
-    connection.close();
   }
 
-  private void replicateUsingCallable(ClusterConnection connection, Queue<Entry> entries)
-      throws IOException, RuntimeException {
+  private void replicateUsingCallable(AsyncClusterConnection connection, Queue<Entry> entries)
+      throws IOException, ExecutionException, InterruptedException {
     Entry entry;
     while ((entry = entries.poll()) != null) {
       byte[] row = CellUtil.cloneRow(entry.getEdit().getCells().get(0));
-      RegionLocations locations = connection.locateRegion(tableName, row, true, true);
-      RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection,
-        RpcControllerFactory.instantiate(connection.getConfiguration()),
-        table.getName(), locations.getRegionLocation(1),
-        locations.getRegionLocation(1).getRegionInfo(), row, Lists.newArrayList(entry),
-        new AtomicLong());
-
-      RpcRetryingCallerFactory factory = RpcRetryingCallerFactory.instantiate(
-        connection.getConfiguration());
-      factory.<ReplicateWALEntryResponse> newCaller().callWithRetries(callable, 10000);
+      RegionLocations locations = connection.getRegionLocations(tableName, row, true).get();
+      connection
+        .replay(tableName, locations.getRegionLocation(1).getRegion().getEncodedNameAsBytes(), row,
+          Collections.singletonList(entry), 1, Integer.MAX_VALUE, TimeUnit.SECONDS.toNanos(10))
+        .get();
     }
   }
 
@@ -218,49 +211,49 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
   public void testReplayCallableWithRegionMove() throws Exception {
     // tests replaying the edits to a secondary region replica using the Callable directly while
     // the region is moved to another location.It tests handling of RME.
-    openRegion(HTU, rs0, hriSecondary);
-    ClusterConnection connection =
-        (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration());
-    //load some data to primary
-    HTU.loadNumericRows(table, f, 0, 1000);
+    try (AsyncClusterConnection conn = ClusterConnectionFactory
+      .createAsyncClusterConnection(HTU.getConfiguration(), null, User.getCurrent())) {
+      openRegion(HTU, rs0, hriSecondary);
+      // load some data to primary
+      HTU.loadNumericRows(table, f, 0, 1000);
 
-    Assert.assertEquals(1000, entries.size());
-    // replay the edits to the secondary using replay callable
-    replicateUsingCallable(connection, entries);
+      Assert.assertEquals(1000, entries.size());
 
-    Region region = rs0.getRegion(hriSecondary.getEncodedName());
-    HTU.verifyNumericRows(region, f, 0, 1000);
+      // replay the edits to the secondary using replay callable
+      replicateUsingCallable(conn, entries);
 
-    HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
+      Region region = rs0.getRegion(hriSecondary.getEncodedName());
+      HTU.verifyNumericRows(region, f, 0, 1000);
 
-    // move the secondary region from RS0 to RS1
-    closeRegion(HTU, rs0, hriSecondary);
-    openRegion(HTU, rs1, hriSecondary);
+      HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary
 
-    // replicate the new data
-    replicateUsingCallable(connection, entries);
+      // move the secondary region from RS0 to RS1
+      closeRegion(HTU, rs0, hriSecondary);
+      openRegion(HTU, rs1, hriSecondary);
 
-    region = rs1.getRegion(hriSecondary.getEncodedName());
-    // verify the new data. old data may or may not be there
-    HTU.verifyNumericRows(region, f, 1000, 2000);
+      // replicate the new data
+      replicateUsingCallable(conn, entries);
 
-    HTU.deleteNumericRows(table, f, 0, 2000);
-    closeRegion(HTU, rs1, hriSecondary);
-    connection.close();
+      region = rs1.getRegion(hriSecondary.getEncodedName());
+      // verify the new data. old data may or may not be there
+      HTU.verifyNumericRows(region, f, 1000, 2000);
+
+      HTU.deleteNumericRows(table, f, 0, 2000);
+      closeRegion(HTU, rs1, hriSecondary);
+    }
   }
 
   @Test
   public void testRegionReplicaReplicationEndpointReplicate() throws Exception {
     // tests replaying the edits to a secondary region replica using the RRRE.replicate()
     openRegion(HTU, rs0, hriSecondary);
-    ClusterConnection connection =
-        (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration());
     RegionReplicaReplicationEndpoint replicator = new RegionReplicaReplicationEndpoint();
 
     ReplicationEndpoint.Context context = mock(ReplicationEndpoint.Context.class);
     when(context.getConfiguration()).thenReturn(HTU.getConfiguration());
     when(context.getMetrics()).thenReturn(mock(MetricsSource.class));
-
+    when(context.getServer()).thenReturn(rs0);
+    when(context.getTableDescriptors()).thenReturn(rs0.getTableDescriptors());
     replicator.init(context);
     replicator.startAsync();
 
@@ -272,12 +265,11 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
     final String fakeWalGroupId = "fakeWALGroup";
     replicator.replicate(new ReplicateContext().setEntries(Lists.newArrayList(entries))
         .setWalGroupId(fakeWalGroupId));
-
+    replicator.stop();
     Region region = rs0.getRegion(hriSecondary.getEncodedName());
     HTU.verifyNumericRows(region, f, 0, 1000);
 
     HTU.deleteNumericRows(table, f, 0, 1000);
     closeRegion(HTU, rs0, hriSecondary);
-    connection.close();
   }
 }


[hbase] 09/27: HBASE-21778 Remove the usage of the locateRegion related methods in ClusterConnection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 68a23dd792aec100b92b4723bf8cbbb74eba0c65
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Fri Feb 1 16:40:34 2019 +0800

    HBASE-21778 Remove the usage of the locateRegion related methods in ClusterConnection
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../apache/hadoop/hbase/client/AsyncProcess.java   |   4 +-
 .../hadoop/hbase/client/BufferedMutatorImpl.java   |   3 +-
 .../hbase/client/ClientAsyncPrefetchScanner.java   |   4 +-
 .../apache/hadoop/hbase/client/ClientScanner.java  |   6 +-
 .../hadoop/hbase/client/ClientServiceCallable.java |   4 +-
 .../hadoop/hbase/client/ClientSimpleScanner.java   |   4 +-
 .../hadoop/hbase/client/ClusterConnection.java     | 149 ---------------------
 .../hbase/client/ConnectionImplementation.java     | 127 ++++++++++++------
 .../hadoop/hbase/client/FlushRegionCallable.java   |   6 +-
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |   8 +-
 .../org/apache/hadoop/hbase/client/HTable.java     |   2 +-
 .../hbase/client/RegionAdminServiceCallable.java   |  12 +-
 .../hadoop/hbase/client/RegionServerCallable.java  |  10 +-
 .../hadoop/hbase/client/ReversedClientScanner.java |   4 +-
 .../client/RpcRetryingCallerWithReadReplicas.java  |  26 ++--
 .../hbase/client/ScannerCallableWithReplicas.java  |  27 ++--
 .../hadoop/hbase/client/TestAsyncProcess.java      |  98 +++++++-------
 .../TestAsyncProcessWithRegionException.java       |  10 +-
 .../hadoop/hbase/client/TestBufferedMutator.java   |   2 +-
 .../hadoop/hbase/client/TestClientScanner.java     |   8 +-
 .../hbase/client/TestReversedScannerCallable.java  |   2 +-
 ...nTestTimeBoundedRequestsWithRegionReplicas.java |  13 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java    |  34 ++---
 .../apache/hadoop/hbase/util/RegionSplitter.java   |  19 ++-
 .../hbase/client/HConnectionTestingUtility.java    |  89 ++++++------
 .../apache/hadoop/hbase/client/TestCISleep.java    |   4 +-
 .../hbase/client/TestHBaseAdminNoCluster.java      |   7 +-
 .../TestMetaTableAccessorNoCluster.java            |  47 ++++---
 .../hbase/client/TestReplicaWithCluster.java       |  41 +++---
 .../hadoop/hbase/client/TestReplicasClient.java    |   7 +-
 .../hbase/client/TestSeparateClientZKCluster.java  |   2 +-
 .../master/TestMetaAssignmentWithStopMaster.java   |  48 +++----
 .../TestLoadIncrementalHFilesSplitRecovery.java    |  27 ++--
 .../hadoop/hbase/util/BaseTestHBaseFsck.java       |   3 +-
 .../hadoop/hbase/util/MultiThreadedAction.java     |  23 ++--
 .../hadoop/hbase/util/MultiThreadedReader.java     |  11 +-
 .../hadoop/hbase/util/MultiThreadedWriterBase.java |   8 +-
 37 files changed, 391 insertions(+), 508 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index de7449b..b0f863f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -136,7 +136,7 @@ class AsyncProcess {
   // TODO: many of the fields should be made private
   final long id;
 
-  final ClusterConnection connection;
+  final ConnectionImplementation connection;
   private final RpcRetryingCallerFactory rpcCallerFactory;
   final RpcControllerFactory rpcFactory;
 
@@ -161,7 +161,7 @@ class AsyncProcess {
   public static final String LOG_DETAILS_PERIOD = "hbase.client.log.detail.period.ms";
   private static final int DEFAULT_LOG_DETAILS_PERIOD = 10000;
   private final int periodToLog;
-  AsyncProcess(ClusterConnection hc, Configuration conf,
+  AsyncProcess(ConnectionImplementation hc, Configuration conf,
       RpcRetryingCallerFactory rpcCaller, RpcControllerFactory rpcFactory) {
     if (hc == null) {
       throw new IllegalArgumentException("ClusterConnection cannot be null.");
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index f0c8da4..922611b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -140,7 +140,8 @@ public class BufferedMutatorImpl implements BufferedMutator {
             params.getOperationTimeout() : conn.getConnectionConfiguration().getOperationTimeout());
     this.ap = ap;
   }
-  BufferedMutatorImpl(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory,
+
+  BufferedMutatorImpl(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory,
       RpcControllerFactory rpcFactory, BufferedMutatorParams params) {
     this(conn, params,
       // puts need to track errors globally due to how the APIs currently work.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
index 0efc2ac..76d7409 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
@@ -66,11 +66,11 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
   private final Condition notFull = lock.newCondition();
 
   public ClientAsyncPrefetchScanner(Configuration configuration, Scan scan, TableName name,
-      ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory,
       RpcControllerFactory rpcControllerFactory, ExecutorService pool,
       int replicaCallTimeoutMicroSecondScan) throws IOException {
     super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool,
-        replicaCallTimeoutMicroSecondScan);
+      replicaCallTimeoutMicroSecondScan);
   }
 
   @VisibleForTesting
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 0342537..fb89925 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -69,7 +69,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
   // Keep lastResult returned successfully in case we have to reset scanner.
   protected Result lastResult = null;
   protected final long maxScannerResultSize;
-  private final ClusterConnection connection;
+  private final ConnectionImplementation connection;
   protected final TableName tableName;
   protected final int scannerTimeout;
   protected RpcRetryingCaller<Result[]> caller;
@@ -94,7 +94,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
    * @throws IOException
    */
   public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
-      ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory,
       RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
       throws IOException {
     if (LOG.isTraceEnabled()) {
@@ -138,7 +138,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
     initCache();
   }
 
-  protected ClusterConnection getConnection() {
+  protected ConnectionImplementation getConnection() {
     return this.connection;
   }
 
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
index f118e7a..67ba838 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
@@ -34,9 +34,9 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 public abstract class ClientServiceCallable<T> extends
     RegionServerCallable<T, ClientProtos.ClientService.BlockingInterface> {
 
-  public ClientServiceCallable(Connection connection, TableName tableName, byte [] row,
+  public ClientServiceCallable(Connection connection, TableName tableName, byte[] row,
       RpcController rpcController, int priority) {
-    super(connection, tableName, row, rpcController, priority);
+    super((ConnectionImplementation) connection, tableName, row, rpcController, priority);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
index 7e9c4b9..e5d7b97 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
@@ -37,11 +37,11 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 @InterfaceAudience.Private
 public class ClientSimpleScanner extends ClientScanner {
   public ClientSimpleScanner(Configuration configuration, Scan scan, TableName name,
-      ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory,
       RpcControllerFactory rpcControllerFactory, ExecutorService pool,
       int replicaCallTimeoutMicroSecondScan) throws IOException {
     super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool,
-        replicaCallTimeoutMicroSecondScan);
+      replicaCallTimeoutMicroSecondScan);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 304f832..092bd24 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -18,11 +18,8 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.util.List;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
@@ -93,135 +90,6 @@ public interface ClusterConnection extends Connection {
   TableState getTableState(TableName tableName)  throws IOException;
 
   /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @return HRegionLocation that describes where to find the region in
-   *   question
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation locateRegion(final TableName tableName,
-      final byte [] row) throws IOException;
-
-  /**
-   * @deprecated {@link #clearRegionLocationCache()} instead.
-   */
-  @Deprecated
-  default void clearRegionCache() {
-    clearRegionLocationCache();
-  }
-
-  void cacheLocation(final TableName tableName, final RegionLocations location);
-
-  /**
-   * Allows flushing the region cache of all locations that pertain to
-   * <code>tableName</code>
-   * @param tableName Name of the table whose regions we are to remove from
-   *   cache.
-   */
-  void clearRegionCache(final TableName tableName);
-
-  /**
-   * Deletes cached locations for the specific region.
-   * @param location The location object for the region, to be purged from cache.
-   */
-  void deleteCachedRegionLocation(final HRegionLocation location);
-
-  /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in, ignoring any value that might be in the cache.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @return HRegionLocation that describes where to find the region in
-   *   question
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation relocateRegion(final TableName tableName,
-      final byte [] row) throws IOException;
-
-  /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in, ignoring any value that might be in the cache.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @param replicaId the replicaId of the region
-   * @return RegionLocations that describe where to find the region in
-   *   question
-   * @throws IOException if a remote or network exception occurs
-   */
-  RegionLocations relocateRegion(final TableName tableName,
-      final byte [] row, int replicaId) throws IOException;
-
-  /**
-   * Update the location cache. This is used internally by HBase, in most cases it should not be
-   *  used by the client application.
-   * @param tableName the table name
-   * @param regionName the region name
-   * @param rowkey the row
-   * @param exception the exception if any. Can be null.
-   * @param source the previous location
-   */
-  void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey,
-                                    Object exception, ServerName source);
-
-  /**
-   * Gets the location of the region of <i>regionName</i>.
-   * @param regionName name of the region to locate
-   * @return HRegionLocation that describes where to find the region in
-   *   question
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation locateRegion(final byte[] regionName)
-  throws IOException;
-
-  /**
-   * Gets the locations of all regions in the specified table, <i>tableName</i>.
-   * @param tableName table to get regions of
-   * @return list of region locations for all regions of table
-   * @throws IOException if IO failure occurs
-   */
-  List<HRegionLocation> locateRegions(final TableName tableName) throws IOException;
-
-  /**
-   * Gets the locations of all regions in the specified table, <i>tableName</i>.
-   * @param tableName table to get regions of
-   * @param useCache Should we use the cache to retrieve the region information.
-   * @param offlined True if we are to include offlined regions, false and we'll leave out offlined
-   *          regions from returned list.
-   * @return list of region locations for all regions of table
-   * @throws IOException if IO failure occurs
-   */
-  List<HRegionLocation> locateRegions(final TableName tableName,
-      final boolean useCache,
-      final boolean offlined) throws IOException;
-
-  /**
-   *
-   * @param tableName table to get regions of
-   * @param row the row
-   * @param useCache Should we use the cache to retrieve the region information.
-   * @param retry do we retry
-   * @return region locations for this row.
-   * @throws IOException if IO failure occurs
-   */
-  RegionLocations locateRegion(TableName tableName,
-                               byte[] row, boolean useCache, boolean retry) throws IOException;
-
- /**
-  *
-  * @param tableName table to get regions of
-  * @param row the row
-  * @param useCache Should we use the cache to retrieve the region information.
-  * @param retry do we retry
-  * @param replicaId the replicaId for the region
-  * @return region locations for this row.
-  * @throws IOException if IO failure occurs
-  */
-  RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry,
-     int replicaId) throws IOException;
-
-  /**
    * Returns a {@link MasterKeepAliveConnection} to the active master
    */
   MasterKeepAliveConnection getMaster() throws IOException;
@@ -251,23 +119,6 @@ public interface ClusterConnection extends Connection {
   ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException;
 
   /**
-   * Find region location hosting passed row
-   * @param tableName table name
-   * @param row Row to find.
-   * @param reload If true do not use cache, otherwise bypass.
-   * @return Location of row.
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reload)
-      throws IOException;
-
-  /**
-   * Clear any caches that pertain to server name <code>sn</code>.
-   * @param sn A server name
-   */
-  void clearCaches(final ServerName sn);
-
-  /**
    * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration.
    */
   NonceGenerator getNonceGenerator();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index e63c16d..49fa81b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -615,9 +615,16 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return true;
   }
 
-  @Override
-  public HRegionLocation getRegionLocation(final TableName tableName, final byte[] row,
-      boolean reload) throws IOException {
+  /**
+   * Find region location hosting passed row
+   * @param tableName table name
+   * @param row Row to find.
+   * @param reload If true do not use cache, otherwise bypass.
+   * @return Location of row.
+   * @throws IOException if a remote or network exception occurs
+   */
+  HRegionLocation getRegionLocation(final TableName tableName, final byte[] row, boolean reload)
+      throws IOException {
     return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row);
   }
 
@@ -687,13 +694,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     }
   }
 
-  @Override
-  public HRegionLocation locateRegion(final byte[] regionName) throws IOException {
-    RegionLocations locations = locateRegion(RegionInfo.getTable(regionName),
-      RegionInfo.getStartKey(regionName), false, true);
-    return locations == null ? null : locations.getRegionLocation();
-  }
-
   private boolean isDeadServer(ServerName sn) {
     if (clusterStatusListener == null) {
       return false;
@@ -702,13 +702,26 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     }
   }
 
-  @Override
-  public List<HRegionLocation> locateRegions(TableName tableName) throws IOException {
+  /**
+   * Gets the locations of all regions in the specified table, <i>tableName</i>.
+   * @param tableName table to get regions of
+   * @return list of region locations for all regions of table
+   * @throws IOException if IO failure occurs
+   */
+  List<HRegionLocation> locateRegions(TableName tableName) throws IOException {
     return locateRegions(tableName, false, true);
   }
 
-  @Override
-  public List<HRegionLocation> locateRegions(TableName tableName, boolean useCache,
+  /**
+   * Gets the locations of all regions in the specified table, <i>tableName</i>.
+   * @param tableName table to get regions of
+   * @param useCache Should we use the cache to retrieve the region information.
+   * @param offlined True if we are to include offlined regions, false and we'll leave out offlined
+   *          regions from returned list.
+   * @return list of region locations for all regions of table
+   * @throws IOException if IO failure occurs
+   */
+  List<HRegionLocation> locateRegions(TableName tableName, boolean useCache,
       boolean offlined) throws IOException {
     List<RegionInfo> regions;
     if (TableName.isMetaTableName(tableName)) {
@@ -733,24 +746,44 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return locations;
   }
 
-  @Override
-  public HRegionLocation locateRegion(final TableName tableName, final byte[] row)
-      throws IOException {
+  /**
+   * Find the location of the region of <i>tableName</i> that <i>row</i> lives in.
+   * @param tableName name of the table <i>row</i> is in
+   * @param row row key you're trying to find the region of
+   * @return HRegionLocation that describes where to find the region in question
+   * @throws IOException if a remote or network exception occurs
+   */
+  HRegionLocation locateRegion(final TableName tableName, final byte[] row) throws IOException {
     RegionLocations locations = locateRegion(tableName, row, true, true);
     return locations == null ? null : locations.getRegionLocation();
   }
 
-  @Override
-  public HRegionLocation relocateRegion(final TableName tableName, final byte[] row)
-      throws IOException {
+  /**
+   * Find the location of the region of <i>tableName</i> that <i>row</i> lives in, ignoring any
+   * value that might be in the cache.
+   * @param tableName name of the table <i>row</i> is in
+   * @param row row key you're trying to find the region of
+   * @return HRegionLocation that describes where to find the region in question
+   * @throws IOException if a remote or network exception occurs
+   */
+  HRegionLocation relocateRegion(final TableName tableName, final byte[] row) throws IOException {
     RegionLocations locations =
       relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID);
     return locations == null ? null
       : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
   }
 
-  @Override
-  public RegionLocations relocateRegion(final TableName tableName,
+  /**
+   * Find the location of the region of <i>tableName</i> that <i>row</i>
+   * lives in, ignoring any value that might be in the cache.
+   * @param tableName name of the table <i>row</i> is in
+   * @param row row key you're trying to find the region of
+   * @param replicaId the replicaId of the region
+   * @return RegionLocations that describe where to find the region in
+   *   question
+   * @throws IOException if a remote or network exception occurs
+   */
+  RegionLocations relocateRegion(final TableName tableName,
       final byte [] row, int replicaId) throws IOException{
     // Since this is an explicit request not to use any caching, finding
     // disabled tables should not be desirable.  This will ensure that an exception is thrown when
@@ -762,14 +795,30 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return locateRegion(tableName, row, false, true, replicaId);
   }
 
-  @Override
-  public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
+  /**
+   * @param tableName table to get regions of
+   * @param row the row
+   * @param useCache Should we use the cache to retrieve the region information.
+   * @param retry do we retry
+   * @return region locations for this row.
+   * @throws IOException if IO failure occurs
+   */
+  RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
       boolean retry) throws IOException {
     return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
   }
 
-  @Override
-  public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
+  /**
+  *
+  * @param tableName table to get regions of
+  * @param row the row
+  * @param useCache Should we use the cache to retrieve the region information.
+  * @param retry do we retry
+  * @param replicaId the replicaId for the region
+  * @return region locations for this row.
+  * @throws IOException if IO failure occurs
+  */
+  RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
       boolean retry, int replicaId) throws IOException {
     checkClosed();
     if (tableName == null || tableName.getName().length == 0) {
@@ -973,8 +1022,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    * @param tableName The table name.
    * @param location the new location
    */
-  @Override
-  public void cacheLocation(final TableName tableName, final RegionLocations location) {
+  void cacheLocation(final TableName tableName, final RegionLocations location) {
     metaCache.cacheLocation(tableName, location);
   }
 
@@ -988,15 +1036,15 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     return metaCache.getCachedLocation(tableName, row);
   }
 
-  public void clearRegionCache(final TableName tableName, byte[] row) {
+  void clearRegionCache(final TableName tableName, byte[] row) {
     metaCache.clearCache(tableName, row);
   }
 
-  /*
-   * Delete all cached entries of a table that maps to a specific location.
+  /**
+   * Clear any caches that pertain to server name <code>sn</code>.
+   * @param sn A server name
    */
-  @Override
-  public void clearCaches(final ServerName serverName) {
+  void clearCaches(final ServerName serverName) {
     metaCache.clearCache(serverName);
   }
 
@@ -1005,8 +1053,11 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     metaCache.clearCache();
   }
 
-  @Override
-  public void clearRegionCache(final TableName tableName) {
+  /**
+   * Allows flushing the region cache of all locations that pertain to <code>tableName</code>
+   * @param tableName Name of the table whose regions we are to remove from cache.
+   */
+  void clearRegionCache(final TableName tableName) {
     metaCache.clearCache(tableName);
   }
 
@@ -1876,8 +1927,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     cacheLocation(hri.getTable(), source, newHrl);
   }
 
-  @Override
-  public void deleteCachedRegionLocation(final HRegionLocation location) {
+  void deleteCachedRegionLocation(final HRegionLocation location) {
     metaCache.clearCache(location);
   }
 
@@ -1889,8 +1939,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
    *   or wrapped or both RegionMovedException
    * @param source server that is the source of the location update.
    */
-  @Override
-  public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey,
+  void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey,
     final Object exception, final ServerName source) {
     if (rowkey == null || tableName == null) {
       LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) +
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java
index bb265a4..d881fe0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
@@ -28,6 +27,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
@@ -42,7 +42,7 @@ public class FlushRegionCallable extends RegionAdminServiceCallable<FlushRegionR
   private final boolean writeFlushWalMarker;
   private boolean reload;
 
-  public FlushRegionCallable(ClusterConnection connection,
+  public FlushRegionCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] regionName,
       byte[] regionStartKey, boolean writeFlushWalMarker) {
     super(connection, rpcControllerFactory, tableName, regionStartKey);
@@ -50,7 +50,7 @@ public class FlushRegionCallable extends RegionAdminServiceCallable<FlushRegionR
     this.writeFlushWalMarker = writeFlushWalMarker;
   }
 
-  public FlushRegionCallable(ClusterConnection connection,
+  public FlushRegionCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, RegionInfo regionInfo,
       boolean writeFlushWalMarker) {
     this(connection, rpcControllerFactory, regionInfo.getTable(), regionInfo.getRegionName(),
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 5812bd8..f553960 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -245,7 +245,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
 public class HBaseAdmin implements Admin {
   private static final Logger LOG = LoggerFactory.getLogger(HBaseAdmin.class);
 
-  private ClusterConnection connection;
+  private ConnectionImplementation connection;
 
   private final Configuration conf;
   private final long pause;
@@ -271,7 +271,7 @@ public class HBaseAdmin implements Admin {
     return syncWaitTimeout;
   }
 
-  HBaseAdmin(ClusterConnection connection) throws IOException {
+  HBaseAdmin(ConnectionImplementation connection) throws IOException {
     this.conf = connection.getConfiguration();
     this.connection = connection;
 
@@ -644,7 +644,9 @@ public class HBaseAdmin implements Admin {
     protected Void postOperationResult(final Void result, final long deadlineTs)
         throws IOException, TimeoutException {
       // Delete cached information to prevent clients from using old locations
-      ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName());
+      try (RegionLocator locator = getAdmin().getConnection().getRegionLocator(getTableName())) {
+        locator.clearRegionLocationCache();
+      }
       return super.postOperationResult(result, deadlineTs);
     }
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index bef7a51..4300f6e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -102,7 +102,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType
 public class HTable implements Table {
   private static final Logger LOG = LoggerFactory.getLogger(HTable.class);
   private static final Consistency DEFAULT_CONSISTENCY = Consistency.STRONG;
-  private final ClusterConnection connection;
+  private final ConnectionImplementation connection;
   private final TableName tableName;
   private final Configuration configuration;
   private final ConnectionConfiguration connConfiguration;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index ece91bd..33dfefa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -46,25 +46,25 @@ public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<
   protected final RpcControllerFactory rpcControllerFactory;
   private HBaseRpcController controller = null;
 
-  protected final ClusterConnection connection;
+  protected final ConnectionImplementation connection;
   protected HRegionLocation location;
   protected final TableName tableName;
   protected final byte[] row;
   protected final int replicaId;
 
-  public RegionAdminServiceCallable(ClusterConnection connection,
+  public RegionAdminServiceCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] row) {
     this(connection, rpcControllerFactory, null, tableName, row);
   }
 
-  public RegionAdminServiceCallable(ClusterConnection connection,
+  public RegionAdminServiceCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, HRegionLocation location,
       TableName tableName, byte[] row) {
     this(connection, rpcControllerFactory, location,
       tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID);
   }
 
-  public RegionAdminServiceCallable(ClusterConnection connection,
+  public RegionAdminServiceCallable(ConnectionImplementation connection,
       RpcControllerFactory rpcControllerFactory, HRegionLocation location,
       TableName tableName, byte[] row, int replicaId) {
     this.connection = connection;
@@ -138,8 +138,8 @@ public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<
     return ConnectionUtils.getPauseTime(pause, tries);
   }
 
-  public static RegionLocations getRegionLocations(
-      ClusterConnection connection, TableName tableName, byte[] row,
+  private static RegionLocations getRegionLocations(
+      ConnectionImplementation connection, TableName tableName, byte[] row,
       boolean useCache, int replicaId)
       throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException {
     RegionLocations rl;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index f709c44..264304e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 // Public but should be package private only it is used by MetaTableAccessor. FIX!!
 @InterfaceAudience.Private
 public abstract class RegionServerCallable<T, S> implements RetryingCallable<T> {
-  private final Connection connection;
+  private final ConnectionImplementation connection;
   private final TableName tableName;
   private final byte[] row;
   /**
@@ -75,12 +75,12 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
    * @param tableName Table name to which <code>row</code> belongs.
    * @param row The row we want in <code>tableName</code>.
    */
-  public RegionServerCallable(Connection connection, TableName tableName, byte [] row,
+  public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row,
       RpcController rpcController) {
     this(connection, tableName, row, rpcController, HConstants.NORMAL_QOS);
   }
 
-  public RegionServerCallable(Connection connection, TableName tableName, byte [] row,
+  public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row,
       RpcController rpcController, int priority) {
     super();
     this.connection = connection;
@@ -162,8 +162,8 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
   /**
    * @return {@link ClusterConnection} instance used by this Callable.
    */
-  protected ClusterConnection getConnection() {
-    return (ClusterConnection) this.connection;
+  protected ConnectionImplementation getConnection() {
+    return this.connection;
   }
 
   protected HRegionLocation getLocation() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
index 53b9641..34c24c0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
@@ -46,11 +46,11 @@ public class ReversedClientScanner extends ClientScanner {
    * @throws IOException
    */
   public ReversedClientScanner(Configuration conf, Scan scan, TableName tableName,
-      ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
+      ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory,
       RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
       throws IOException {
     super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool,
-        primaryOperationTimeout);
+      primaryOperationTimeout);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index c82fcb8..56985c0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.client;
 
-
 import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET;
 
 import java.io.IOException;
@@ -29,7 +28,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -60,7 +58,7 @@ public class RpcRetryingCallerWithReadReplicas {
       LoggerFactory.getLogger(RpcRetryingCallerWithReadReplicas.class);
 
   protected final ExecutorService pool;
-  protected final ClusterConnection cConnection;
+  protected final ConnectionImplementation cConnection;
   protected final Configuration conf;
   protected final Get get;
   protected final TableName tableName;
@@ -73,7 +71,7 @@ public class RpcRetryingCallerWithReadReplicas {
 
   public RpcRetryingCallerWithReadReplicas(
       RpcControllerFactory rpcControllerFactory, TableName tableName,
-      ClusterConnection cConnection, final Get get,
+      ConnectionImplementation cConnection, final Get get,
       ExecutorService pool, int retries, int operationTimeout, int rpcTimeout,
       int timeBeforeReplicas) {
     this.rpcControllerFactory = rpcControllerFactory;
@@ -187,19 +185,14 @@ public class RpcRetryingCallerWithReadReplicas {
       } else {
         // We cannot get the primary replica location, it is possible that the region
         // server hosting meta is down, it needs to proceed to try cached replicas.
-        if (cConnection instanceof ConnectionImplementation) {
-          rl = ((ConnectionImplementation)cConnection).getCachedLocation(tableName, get.getRow());
-          if (rl == null) {
-            // No cached locations
-            throw e;
-          }
-
-          // Primary replica location is not known, skip primary replica
-          skipPrimary = true;
-        } else {
-          // For completeness
+        rl = cConnection.getCachedLocation(tableName, get.getRow());
+        if (rl == null) {
+          // No cached locations
           throw e;
         }
+
+        // Primary replica location is not known, skip primary replica
+        skipPrimary = true;
       }
     }
 
@@ -318,9 +311,8 @@ public class RpcRetryingCallerWithReadReplicas {
   }
 
   static RegionLocations getRegionLocations(boolean useCache, int replicaId,
-                 ClusterConnection cConnection, TableName tableName, byte[] row)
+      ConnectionImplementation cConnection, TableName tableName, byte[] row)
       throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException {
-
     RegionLocations rl;
     try {
       if (useCache) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
index bcb81f7..27e5f87 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hbase.client;
 
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.HashSet;
@@ -31,17 +29,18 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
-import org.apache.hadoop.hbase.util.Pair;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
 /**
  * This class has the logic for handling scanners for regions with and without replicas.
@@ -60,7 +59,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
   private static final Logger LOG = LoggerFactory.getLogger(ScannerCallableWithReplicas.class);
   volatile ScannerCallable currentScannerCallable;
   AtomicBoolean replicaSwitched = new AtomicBoolean(false);
-  final ClusterConnection cConnection;
+  final ConnectionImplementation cConnection;
   protected final ExecutorService pool;
   protected final int timeBeforeReplicas;
   private final Scan scan;
@@ -74,7 +73,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
   private boolean someRPCcancelled = false; //required for testing purposes only
   private int regionReplication = 0;
 
-  public ScannerCallableWithReplicas(TableName tableName, ClusterConnection cConnection,
+  public ScannerCallableWithReplicas(TableName tableName, ConnectionImplementation cConnection,
       ScannerCallable baseCallable, ExecutorService pool, int timeBeforeReplicas, Scan scan,
       int retries, int scannerTimeout, int caching, Configuration conf,
       RpcRetryingCaller<Result []> caller) {
@@ -151,19 +150,13 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
       RegionLocations rl = null;
       try {
         rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(true,
-            RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName,
-            currentScannerCallable.getRow());
+          RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName,
+          currentScannerCallable.getRow());
       } catch (RetriesExhaustedException | DoNotRetryIOException e) {
         // We cannot get the primary replica region location, it is possible that the region server
         // hosting meta table is down, it needs to proceed to try cached replicas directly.
-        if (cConnection instanceof ConnectionImplementation) {
-          rl = ((ConnectionImplementation) cConnection)
-              .getCachedLocation(tableName, currentScannerCallable.getRow());
-          if (rl == null) {
-            throw e;
-          }
-        } else {
-          // For completeness
+        rl = cConnection.getCachedLocation(tableName, currentScannerCallable.getRow());
+        if (rl == null) {
           throw e;
         }
       }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 81dcc46..bd6f03c 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -175,17 +175,17 @@ public class TestAsyncProcess {
       return r;
     }
 
-    public MyAsyncProcess(ClusterConnection hc, Configuration conf) {
-      super(hc, conf,
-          new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
+    public MyAsyncProcess(ConnectionImplementation hc, Configuration conf) {
+      super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
       service = Executors.newFixedThreadPool(5);
       this.conf = conf;
     }
 
-    public MyAsyncProcess(ClusterConnection hc, Configuration conf, AtomicInteger nbThreads) {
+    public MyAsyncProcess(ConnectionImplementation hc, Configuration conf,
+        AtomicInteger nbThreads) {
       super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
-      service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
-          new SynchronousQueue<>(), new CountingThreadFactory(nbThreads));
+      service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, new SynchronousQueue<>(),
+        new CountingThreadFactory(nbThreads));
     }
 
     public <CResult> AsyncRequestFuture submit(ExecutorService pool, TableName tableName,
@@ -326,7 +326,8 @@ public class TestAsyncProcess {
 
     private final IOException ioe;
 
-    public AsyncProcessWithFailure(ClusterConnection hc, Configuration conf, IOException ioe) {
+    public AsyncProcessWithFailure(ConnectionImplementation hc, Configuration conf,
+        IOException ioe) {
       super(hc, conf);
       this.ioe = ioe;
       serverTrackerTimeout = 1L;
@@ -376,7 +377,7 @@ public class TestAsyncProcess {
       customPrimarySleepMs.put(server, primaryMs);
     }
 
-    public MyAsyncProcessWithReplicas(ClusterConnection hc, Configuration conf) {
+    public MyAsyncProcessWithReplicas(ConnectionImplementation hc, Configuration conf) {
       super(hc, conf);
     }
 
@@ -622,7 +623,7 @@ public class TestAsyncProcess {
   }
 
   private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
     final long defaultHeapSizePerRequest = conn.getConfiguration().getLong(
@@ -718,7 +719,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmit() throws Exception {
-    ClusterConnection hc = createHConnection();
+    ConnectionImplementation hc = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
 
     List<Put> puts = new ArrayList<>(1);
@@ -730,7 +731,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitWithCB() throws Exception {
-    ClusterConnection hc = createHConnection();
+    ConnectionImplementation hc = createConnectionImpl();
     final AtomicInteger updateCalled = new AtomicInteger(0);
     Batch.Callback<Object> cb = new Batch.Callback<Object>() {
       @Override
@@ -751,7 +752,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitBusyRegion() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
     conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
@@ -779,7 +780,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitBusyRegionServer() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
@@ -810,7 +811,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFail() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF);
+    MyAsyncProcess ap = new MyAsyncProcess(createConnectionImpl(), CONF);
 
     List<Put> puts = new ArrayList<>(1);
     Put p = createPut(1, false);
@@ -836,7 +837,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitTrue() throws IOException {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
@@ -885,7 +886,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFailAndSuccess() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF);
+    MyAsyncProcess ap = new MyAsyncProcess(createConnectionImpl(), CONF);
 
     List<Put> puts = new ArrayList<>(3);
     puts.add(createPut(1, false));
@@ -912,7 +913,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFlush() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF);
+    MyAsyncProcess ap = new MyAsyncProcess(createConnectionImpl(), CONF);
 
     List<Put> puts = new ArrayList<>(3);
     puts.add(createPut(1, false));
@@ -929,7 +930,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testTaskCountWithoutClientBackoffPolicy() throws IOException, InterruptedException {
-    ClusterConnection hc = createHConnection();
+    ConnectionImplementation hc = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
     testTaskCount(ap);
   }
@@ -939,7 +940,7 @@ public class TestAsyncProcess {
     Configuration copyConf = new Configuration(CONF);
     copyConf.setBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, true);
     MyClientBackoffPolicy bp = new MyClientBackoffPolicy();
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     Mockito.when(conn.getConfiguration()).thenReturn(copyConf);
     Mockito.when(conn.getStatisticsTracker()).thenReturn(ServerStatisticTracker.create(copyConf));
     Mockito.when(conn.getBackoffPolicy()).thenReturn(bp);
@@ -979,7 +980,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testMaxTask() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     final String defaultClazz =
         conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
     conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
@@ -1038,8 +1039,8 @@ public class TestAsyncProcess {
     }
   }
 
-  private ClusterConnection createHConnection() throws IOException {
-    ClusterConnection hc = createHConnectionCommon();
+  private ConnectionImplementation createConnectionImpl() throws IOException {
+    ConnectionImplementation hc = createConnectionImplCommon();
     setMockLocation(hc, DUMMY_BYTES_1, new RegionLocations(loc1));
     setMockLocation(hc, DUMMY_BYTES_2, new RegionLocations(loc2));
     setMockLocation(hc, DUMMY_BYTES_3, new RegionLocations(loc3));
@@ -1049,8 +1050,8 @@ public class TestAsyncProcess {
     return hc;
   }
 
-  private ClusterConnection createHConnectionWithReplicas() throws IOException {
-    ClusterConnection hc = createHConnectionCommon();
+  private ConnectionImplementation createConnectionImplWithReplicas() throws IOException {
+    ConnectionImplementation hc = createConnectionImplCommon();
     setMockLocation(hc, DUMMY_BYTES_1, hrls1);
     setMockLocation(hc, DUMMY_BYTES_2, hrls2);
     setMockLocation(hc, DUMMY_BYTES_3, hrls3);
@@ -1069,16 +1070,16 @@ public class TestAsyncProcess {
     return hc;
   }
 
-  private static void setMockLocation(ClusterConnection hc, byte[] row,
+  private static void setMockLocation(ConnectionImplementation hc, byte[] row,
       RegionLocations result) throws IOException {
-    Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row),
-        Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result);
-    Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row),
-        Mockito.anyBoolean(), Mockito.anyBoolean())).thenReturn(result);
+    Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(),
+      Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result);
+    Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(),
+      Mockito.anyBoolean())).thenReturn(result);
   }
 
-  private ClusterConnection createHConnectionCommon() {
-    ClusterConnection hc = Mockito.mock(ClusterConnection.class);
+  private ConnectionImplementation createConnectionImplCommon() {
+    ConnectionImplementation hc = Mockito.mock(ConnectionImplementation.class);
     NonceGenerator ng = Mockito.mock(NonceGenerator.class);
     Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE);
     Mockito.when(hc.getNonceGenerator()).thenReturn(ng);
@@ -1089,7 +1090,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testHTablePutSuccess() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     BufferedMutatorImpl ht = new BufferedMutatorImpl(conn, bufferParam, ap);
@@ -1106,7 +1107,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testSettingWriteBufferPeriodicFlushParameters() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
 
     checkPeriodicFlushParameters(conn, ap,
@@ -1152,7 +1153,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testWriteBufferPeriodicFlushTimeoutMs() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
 
@@ -1219,7 +1220,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testBufferedMutatorImplWithSharedPool() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     BufferedMutator ht = new BufferedMutatorImpl(conn, bufferParam, ap);
@@ -1230,7 +1231,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFailedPutAndNewPut() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE)
             .writeBufferSize(0);
@@ -1275,7 +1276,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testBatch() throws IOException, InterruptedException {
-    ClusterConnection conn = new MyConnectionImpl(CONF);
+    ConnectionImplementation conn = new MyConnectionImpl(CONF);
     HTable ht = (HTable) conn.getTable(DUMMY_TABLE);
     ht.multiAp = new MyAsyncProcess(conn, CONF);
 
@@ -1306,7 +1307,7 @@ public class TestAsyncProcess {
   @Test
   public void testErrorsServers() throws IOException {
     Configuration configuration = new Configuration(CONF);
-    ClusterConnection conn = new MyConnectionImpl(configuration);
+    ConnectionImplementation conn = new MyConnectionImpl(configuration);
     MyAsyncProcess ap = new MyAsyncProcess(conn, configuration);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
@@ -1337,7 +1338,7 @@ public class TestAsyncProcess {
     Configuration copyConf = new Configuration(CONF);
     copyConf.setLong(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, readTimeout);
     copyConf.setLong(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, writeTimeout);
-    ClusterConnection conn = new MyConnectionImpl(copyConf);
+    ConnectionImplementation conn = new MyConnectionImpl(copyConf);
     MyAsyncProcess ap = new MyAsyncProcess(conn, copyConf);
     try (HTable ht = (HTable) conn.getTable(DUMMY_TABLE)) {
       ht.multiAp = ap;
@@ -1370,7 +1371,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testErrors() throws IOException {
-    ClusterConnection conn = new MyConnectionImpl(CONF);
+    ConnectionImplementation conn = new MyConnectionImpl(CONF);
     AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, CONF, new IOException("test"));
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
@@ -1394,7 +1395,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testCallQueueTooLarge() throws IOException {
-    ClusterConnection conn = new MyConnectionImpl(CONF);
+    ConnectionImplementation conn = new MyConnectionImpl(CONF);
     AsyncProcessWithFailure ap =
         new AsyncProcessWithFailure(conn, CONF, new CallQueueTooBigException());
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
@@ -1609,7 +1610,7 @@ public class TestAsyncProcess {
     // TODO: this is kind of timing dependent... perhaps it should detect from createCaller
     //       that the replica call has happened and that way control the ordering.
     Configuration conf = new Configuration();
-    ClusterConnection conn = createHConnectionWithReplicas();
+    ConnectionImplementation conn = createConnectionImplWithReplicas();
     conf.setInt(AsyncProcess.PRIMARY_CALL_TIMEOUT_KEY, replicaAfterMs * 1000);
     if (retries >= 0) {
       conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
@@ -1707,16 +1708,15 @@ public class TestAsyncProcess {
   }
 
   static class AsyncProcessForThrowableCheck extends AsyncProcess {
-    public AsyncProcessForThrowableCheck(ClusterConnection hc, Configuration conf) {
-      super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(
-          conf));
+    public AsyncProcessForThrowableCheck(ConnectionImplementation hc, Configuration conf) {
+      super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
     }
   }
 
   @Test
   public void testUncheckedException() throws Exception {
     // Test the case pool.submit throws unchecked exception
-    ClusterConnection hc = createHConnection();
+    ConnectionImplementation hc = createConnectionImpl();
     MyThreadPoolExecutor myPool =
         new MyThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
             new LinkedBlockingQueue<>(200));
@@ -1748,7 +1748,7 @@ public class TestAsyncProcess {
     final int retries = 1;
     myConf.setLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, specialPause);
     myConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-    ClusterConnection conn = new MyConnectionImpl(myConf);
+    ConnectionImplementation conn = new MyConnectionImpl(myConf);
     AsyncProcessWithFailure ap =
         new AsyncProcessWithFailure(conn, myConf, new CallQueueTooBigException());
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
@@ -1807,7 +1807,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testRetryWithExceptionClearsMetaCache() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     Configuration myConf = conn.getConfiguration();
     myConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);
 
@@ -1840,7 +1840,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testQueueRowAccess() throws Exception {
-    ClusterConnection conn = createHConnection();
+    ConnectionImplementation conn = createConnectionImpl();
     BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, null, null,
       new BufferedMutatorParams(DUMMY_TABLE).writeBufferSize(100000));
     Put p0 = new Put(DUMMY_BYTES_1).addColumn(DUMMY_BYTES_1, DUMMY_BYTES_1, DUMMY_BYTES_1);
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java
index ffc4e51..2c24aaa 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java
@@ -175,8 +175,8 @@ public class TestAsyncProcessWithRegionException {
     }
   }
 
-  private static ClusterConnection createHConnection() throws IOException {
-    ClusterConnection hc = Mockito.mock(ClusterConnection.class);
+  private static ConnectionImplementation createHConnection() throws IOException {
+    ConnectionImplementation hc = Mockito.mock(ConnectionImplementation.class);
     NonceGenerator ng = Mockito.mock(NonceGenerator.class);
     Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE);
     Mockito.when(hc.getNonceGenerator()).thenReturn(ng);
@@ -190,8 +190,8 @@ public class TestAsyncProcessWithRegionException {
     return hc;
   }
 
-  private static void setMockLocation(ClusterConnection hc, byte[] row, RegionLocations result)
-    throws IOException {
+  private static void setMockLocation(ConnectionImplementation hc, byte[] row,
+      RegionLocations result) throws IOException {
     Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(),
       Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result);
     Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(),
@@ -201,7 +201,7 @@ public class TestAsyncProcessWithRegionException {
   private static class MyAsyncProcess extends AsyncProcess {
     private final ExecutorService service = Executors.newFixedThreadPool(5);
 
-    MyAsyncProcess(ClusterConnection hc, Configuration conf) {
+    MyAsyncProcess(ConnectionImplementation hc, Configuration conf) {
       super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf));
     }
 
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
index f8e1295..f0375e2 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
@@ -48,7 +48,7 @@ public class TestBufferedMutator {
    * Just to prove that I can insert a BM other than default.
    */
   public static class MyBufferedMutator extends BufferedMutatorImpl {
-    MyBufferedMutator(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory,
+    MyBufferedMutator(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory,
         RpcControllerFactory rpcFactory, BufferedMutatorParams params) {
       super(conn, rpcCallerFactory, rpcFactory, params);
     }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
index 48ca751..9f1f6f3 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
@@ -71,7 +71,7 @@ public class TestClientScanner {
   ExecutorService pool;
   Configuration conf;
 
-  ClusterConnection clusterConn;
+  ConnectionImplementation clusterConn;
   RpcRetryingCallerFactory rpcFactory;
   RpcControllerFactory controllerFactory;
 
@@ -80,7 +80,7 @@ public class TestClientScanner {
 
   @Before
   public void setup() throws IOException {
-    clusterConn = Mockito.mock(ClusterConnection.class);
+    clusterConn = Mockito.mock(ConnectionImplementation.class);
     rpcFactory = Mockito.mock(RpcRetryingCallerFactory.class);
     controllerFactory = Mockito.mock(RpcControllerFactory.class);
     pool = Executors.newSingleThreadExecutor();
@@ -103,11 +103,11 @@ public class TestClientScanner {
     private boolean initialized = false;
 
     public MockClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
-        ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
+        ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory,
         RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout)
         throws IOException {
       super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool,
-          primaryOperationTimeout);
+        primaryOperationTimeout);
     }
 
     @Override
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
index 4432192..2896a2f 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
@@ -45,7 +45,7 @@ public class TestReversedScannerCallable {
       HBaseClassTestRule.forClass(TestReversedScannerCallable.class);
 
   @Mock
-  private ClusterConnection connection;
+  private ConnectionImplementation connection;
   @Mock
   private Scan scan;
   @Mock
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
index b5e99d2..b9cb167 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
@@ -31,13 +31,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.IntegrationTestIngest;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
@@ -52,6 +51,7 @@ import org.junit.Assert;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 /**
@@ -350,10 +350,11 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
           numReadFailures.addAndGet(1); // fail the test
           for (Result r : results) {
             LOG.error("FAILED FOR " + r);
-            RegionLocations rl = ((ClusterConnection)connection).
-                locateRegion(tableName, r.getRow(), true, true);
-            HRegionLocation locations[] = rl.getRegionLocations();
-            for (HRegionLocation h : locations) {
+            List<HRegionLocation> locs;
+            try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+              locs = locator.getRegionLocations(r.getRow());
+            }
+            for (HRegionLocation h : locs) {
               LOG.error("LOCATION " + h);
             }
           }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 1a00945..ac2d8e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -99,6 +99,7 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -1948,35 +1949,34 @@ public class HBaseFsck extends Configured implements Closeable {
    * Record the location of the hbase:meta region as found in ZooKeeper.
    */
   private boolean recordMetaRegion() throws IOException {
-    RegionLocations rl = connection.locateRegion(TableName.META_TABLE_NAME,
-        HConstants.EMPTY_START_ROW, false, false);
-    if (rl == null) {
-      errors.reportError(ERROR_CODE.NULL_META_REGION,
-          "META region was not found in ZooKeeper");
+    List<HRegionLocation> locs;
+    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
+      locs = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true);
+    }
+    if (locs == null || locs.isEmpty()) {
+      errors.reportError(ERROR_CODE.NULL_META_REGION, "META region was not found in ZooKeeper");
       return false;
     }
-    for (HRegionLocation metaLocation : rl.getRegionLocations()) {
+    for (HRegionLocation metaLocation : locs) {
       // Check if Meta region is valid and existing
-      if (metaLocation == null ) {
-        errors.reportError(ERROR_CODE.NULL_META_REGION,
-            "META region location is null");
+      if (metaLocation == null) {
+        errors.reportError(ERROR_CODE.NULL_META_REGION, "META region location is null");
         return false;
       }
-      if (metaLocation.getRegionInfo() == null) {
-        errors.reportError(ERROR_CODE.NULL_META_REGION,
-            "META location regionInfo is null");
+      if (metaLocation.getRegion() == null) {
+        errors.reportError(ERROR_CODE.NULL_META_REGION, "META location regionInfo is null");
         return false;
       }
       if (metaLocation.getHostname() == null) {
-        errors.reportError(ERROR_CODE.NULL_META_REGION,
-            "META location hostName is null");
+        errors.reportError(ERROR_CODE.NULL_META_REGION, "META location hostName is null");
         return false;
       }
       ServerName sn = metaLocation.getServerName();
-      MetaEntry m = new MetaEntry(metaLocation.getRegionInfo(), sn, EnvironmentEdgeManager.currentTime());
-      HbckInfo hbckInfo = regionInfoMap.get(metaLocation.getRegionInfo().getEncodedName());
+      MetaEntry m =
+        new MetaEntry(metaLocation.getRegion(), sn, EnvironmentEdgeManager.currentTime());
+      HbckInfo hbckInfo = regionInfoMap.get(metaLocation.getRegion().getEncodedName());
       if (hbckInfo == null) {
-        regionInfoMap.put(metaLocation.getRegionInfo().getEncodedName(), new HbckInfo(m));
+        regionInfoMap.put(metaLocation.getRegion().getEncodedName(), new HbckInfo(m));
       } else {
         hbckInfo.metaEntry = m;
       }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 0673c2f..a779d36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -44,22 +44,21 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.NoServerForRegionException;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
@@ -707,7 +706,7 @@ public class RegionSplitter {
     Path tableDir = tableDirAndSplitFile.getFirst();
     FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
     // Clear the cache to forcibly refresh region information
-    ((ClusterConnection)connection).clearRegionLocationCache();
+    connection.clearRegionLocationCache();
     TableDescriptor htd = null;
     try (Table table = connection.getTable(tableName)) {
       htd = table.getDescriptor();
@@ -768,7 +767,7 @@ public class RegionSplitter {
         } catch (NoServerForRegionException nsfre) {
           LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
           physicalSplitting.add(region);
-          ((ClusterConnection)connection).clearRegionLocationCache();
+          connection.clearRegionLocationCache();
         }
       }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 7276eff..2a5a395 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
 import java.io.IOException;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
@@ -39,25 +38,25 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 
 /**
- * {@link ClusterConnection} testing utility.
+ * {@link ConnectionImplementation} testing utility.
  */
 public class HConnectionTestingUtility {
   /*
-   * Not part of {@link HBaseTestingUtility} because this class is not
-   * in same package as {@link ClusterConnection}.  Would have to reveal ugly
-   * {@link ConnectionImplementation} innards to HBaseTestingUtility to give it access.
+   * Not part of {@link HBaseTestingUtility} because this class is not in same package as {@link
+   * ConnectionImplementation}. Would have to reveal ugly {@link ConnectionImplementation} innards
+   * to HBaseTestingUtility to give it access.
    */
   /**
-   * Get a Mocked {@link ClusterConnection} that goes with the passed <code>conf</code>
-   * configuration instance.  Minimally the mock will return
-   * &lt;code>conf&lt;/conf> when {@link ClusterConnection#getConfiguration()} is invoked.
-   * Be sure to shutdown the connection when done by calling
-   * {@link Connection#close()} else it will stick around; this is probably not what you want.
+   * Get a Mocked {@link ConnectionImplementation} that goes with the passed <code>conf</code>
+   * configuration instance. Minimally the mock will return &lt;code>conf&lt;/conf> when
+   * {@link ConnectionImplementation#getConfiguration()} is invoked. Be sure to shutdown the
+   * connection when done by calling {@link Connection#close()} else it will stick around; this is
+   * probably not what you want.
    * @param conf configuration
-   * @return ClusterConnection object for <code>conf</code>
+   * @return ConnectionImplementation object for <code>conf</code>
    * @throws ZooKeeperConnectionException
    */
-  public static ClusterConnection getMockedConnection(final Configuration conf)
+  public static ConnectionImplementation getMockedConnection(final Configuration conf)
   throws ZooKeeperConnectionException {
     ConnectionImplementation connection = Mockito.mock(ConnectionImplementation.class);
     Mockito.when(connection.getConfiguration()).thenReturn(conf);
@@ -70,37 +69,30 @@ public class HConnectionTestingUtility {
   }
 
   /**
-   * Calls {@link #getMockedConnection(Configuration)} and then mocks a few
-   * more of the popular {@link ClusterConnection} methods so they do 'normal'
-   * operation (see return doc below for list). Be sure to shutdown the
-   * connection when done by calling {@link Connection#close()} else it will stick around;
-   * this is probably not what you want.
-   *
+   * Calls {@link #getMockedConnection(Configuration)} and then mocks a few more of the popular
+   * {@link ConnectionImplementation} methods so they do 'normal' operation (see return doc below
+   * for list). Be sure to shutdown the connection when done by calling {@link Connection#close()}
+   * else it will stick around; this is probably not what you want.
    * @param conf Configuration to use
-   * @param admin An AdminProtocol; can be null but is usually
-   * itself a mock.
-   * @param client A ClientProtocol; can be null but is usually
-   * itself a mock.
-   * @param sn ServerName to include in the region location returned by this
-   * <code>connection</code>
-   * @param hri RegionInfo to include in the location returned when
-   * getRegionLocator is called on the mocked connection
+   * @param admin An AdminProtocol; can be null but is usually itself a mock.
+   * @param client A ClientProtocol; can be null but is usually itself a mock.
+   * @param sn ServerName to include in the region location returned by this <code>connection</code>
+   * @param hri RegionInfo to include in the location returned when getRegionLocator is called on
+   *          the mocked connection
    * @return Mock up a connection that returns a {@link Configuration} when
-   * {@link ClusterConnection#getConfiguration()} is called, a 'location' when
-   * {@link ClusterConnection#getRegionLocation(org.apache.hadoop.hbase.TableName, byte[], boolean)}
-   * is called,
-   * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
-   * {@link ClusterConnection#getAdmin(ServerName)} is called, returns the passed
-   * {@link ClientProtos.ClientService.BlockingInterface} instance when
-   * {@link ClusterConnection#getClient(ServerName)} is called (Be sure to call
-   * {@link Connection#close()} when done with this mocked Connection.
-   * @throws IOException
+   *         {@link ConnectionImplementation#getConfiguration()} is called, a 'location' when
+   *         {@link ConnectionImplementation#getRegionLocation(TableName,byte[], boolean)}
+   *         is called, and that returns the passed
+   *         {@link AdminProtos.AdminService.BlockingInterface} instance when
+   *         {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns the passed
+   *         {@link ClientProtos.ClientService.BlockingInterface} instance when
+   *         {@link ConnectionImplementation#getClient(ServerName)} is called (Be sure to call
+   *         {@link Connection#close()} when done with this mocked Connection.
    */
-  public static ClusterConnection getMockedConnectionAndDecorate(final Configuration conf,
+  public static ConnectionImplementation getMockedConnectionAndDecorate(final Configuration conf,
       final AdminProtos.AdminService.BlockingInterface admin,
-      final ClientProtos.ClientService.BlockingInterface client,
-      final ServerName sn, final RegionInfo hri)
-  throws IOException {
+      final ClientProtos.ClientService.BlockingInterface client, final ServerName sn,
+      final RegionInfo hri) throws IOException {
     ConnectionImplementation c = Mockito.mock(ConnectionImplementation.class);
     Mockito.when(c.getConfiguration()).thenReturn(conf);
     Mockito.doNothing().when(c).close();
@@ -141,18 +133,17 @@ public class HConnectionTestingUtility {
   }
 
   /**
-   * Get a Mockito spied-upon {@link ClusterConnection} that goes with the passed
-   * <code>conf</code> configuration instance.
-   * Be sure to shutdown the connection when done by calling
-   * {@link Connection#close()} else it will stick around; this is probably not what you want.
+   * Get a Mockito spied-upon {@link ConnectionImplementation} that goes with the passed
+   * <code>conf</code> configuration instance. Be sure to shutdown the connection when done by
+   * calling {@link Connection#close()} else it will stick around; this is probably not what you
+   * want.
    * @param conf configuration
-   * @return ClusterConnection object for <code>conf</code>
-   * @throws ZooKeeperConnectionException
-   * [Dead link]: See also
-   * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
+   * @return ConnectionImplementation object for <code>conf</code>
+   * @throws ZooKeeperConnectionException [Dead link]: See also
+   *           {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
    */
-  public static ClusterConnection getSpiedConnection(final Configuration conf)
-  throws IOException {
+  public static ConnectionImplementation getSpiedConnection(final Configuration conf)
+      throws IOException {
     ConnectionImplementation connection =
       Mockito.spy(new ConnectionImplementation(conf, null, null));
     return connection;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
index 4e5665d..cd27a30 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
@@ -111,8 +111,8 @@ public class TestCISleep extends AbstractTestCITimeout {
     }
 
     RegionAdminServiceCallable<Object> regionAdminServiceCallable =
-      new RegionAdminServiceCallable<Object>((ClusterConnection) TEST_UTIL.getConnection(),
-          new RpcControllerFactory(TEST_UTIL.getConfiguration()), tableName, FAM_NAM) {
+      new RegionAdminServiceCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
+        new RpcControllerFactory(TEST_UTIL.getConfiguration()), tableName, FAM_NAM) {
         @Override
         public Object call(HBaseRpcController controller) throws Exception {
           return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
index 38f410d..c00f4b6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
@@ -95,8 +95,9 @@ public class TestHBaseAdminNoCluster {
     configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, count);
     // Get mocked connection.   Getting the connection will register it so when HBaseAdmin is
     // constructed with same configuration, it will find this mocked connection.
-    ClusterConnection connection = HConnectionTestingUtility.getMockedConnection(configuration);
-    // Mock so we get back the master interface.  Make it so when createTable is called, we throw
+    ConnectionImplementation connection =
+      HConnectionTestingUtility.getMockedConnection(configuration);
+    // Mock so we get back the master interface. Make it so when createTable is called, we throw
     // the PleaseHoldException.
     MasterKeepAliveConnection masterAdmin = Mockito.mock(MasterKeepAliveConnection.class);
     Mockito.when(masterAdmin.createTable((RpcController)Mockito.any(),
@@ -292,7 +293,7 @@ public class TestHBaseAdminNoCluster {
     final int count = 10;
     configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, count);
 
-    ClusterConnection connection = mock(ClusterConnection.class);
+    ConnectionImplementation connection = mock(ConnectionImplementation.class);
     when(connection.getConfiguration()).thenReturn(configuration);
     MasterKeepAliveConnection masterAdmin =
         Mockito.mock(MasterKeepAliveConnection.class, new Answer() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
similarity index 86%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
index 5d36ea9..53f5064 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase;
+package org.apache.hadoop.hbase.client;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -26,11 +26,19 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.NavigableMap;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -112,8 +120,8 @@ public class TestMetaTableAccessorNoCluster {
     assertTrue(hri == null);
     // OK, give it what it expects
     kvs.clear();
-    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
-      HConstants.REGIONINFO_QUALIFIER, RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO)));
+    kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER,
+      RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO)));
     hri = MetaTableAccessor.getRegionInfo(Result.create(kvs));
     assertNotNull(hri);
     assertTrue(RegionInfo.COMPARATOR.compare(hri, RegionInfoBuilder.FIRST_META_REGIONINFO) == 0);
@@ -123,8 +131,6 @@ public class TestMetaTableAccessorNoCluster {
    * Test that MetaTableAccessor will ride over server throwing
    * "Server not running" IOEs.
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-3446">HBASE-3446</a>
-   * @throws IOException
-   * @throws InterruptedException
    */
   @Test
   public void testRideOverServerNotRunning()
@@ -135,7 +141,7 @@ public class TestMetaTableAccessorNoCluster {
     // This is a servername we use in a few places below.
     ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
 
-    ClusterConnection connection = null;
+    ConnectionImplementation connection = null;
     try {
       // Mock an ClientProtocol. Our mock implementation will fail a few
       // times when we go to open a scanner.
@@ -190,26 +196,27 @@ public class TestMetaTableAccessorNoCluster {
       // Return the RegionLocations object when locateRegion
       // The ugly format below comes of 'Important gotcha on spying real objects!' from
       // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html
-      Mockito.doReturn(rl).when
-      (connection).locateRegion((TableName)Mockito.any(), (byte[])Mockito.any(),
-              Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt());
+      Mockito.doReturn(rl).when(connection).locateRegion((TableName) Mockito.any(),
+        (byte[]) Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt());
 
       // Now shove our HRI implementation into the spied-upon connection.
-      Mockito.doReturn(implementation).
-        when(connection).getClient(Mockito.any());
+      Mockito.doReturn(implementation).when(connection).getClient(Mockito.any());
 
       // Scan meta for user tables and verify we got back expected answer.
       NavigableMap<RegionInfo, Result> hris =
         MetaTableAccessor.getServerUserRegions(connection, sn);
       assertEquals(1, hris.size());
-      assertTrue(RegionInfo.COMPARATOR.compare(hris.firstEntry().getKey(), RegionInfoBuilder.FIRST_META_REGIONINFO) == 0);
+      assertTrue(RegionInfo.COMPARATOR.compare(hris.firstEntry().getKey(),
+        RegionInfoBuilder.FIRST_META_REGIONINFO) == 0);
       assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow()));
       // Finally verify that scan was called four times -- three times
       // with exception and then on 4th attempt we succeed
-      Mockito.verify(implementation, Mockito.times(4)).
-        scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any());
+      Mockito.verify(implementation, Mockito.times(4)).scan((RpcController) Mockito.any(),
+        (ScanRequest) Mockito.any());
     } finally {
-      if (connection != null && !connection.isClosed()) connection.close();
+      if (connection != null && !connection.isClosed()) {
+        connection.close();
+      }
       zkw.close();
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index 89430f6..987ac7e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -672,7 +672,7 @@ public class TestReplicaWithCluster {
   public void testGetRegionLocationFromPrimaryMetaRegion() throws IOException, InterruptedException {
     HTU.getAdmin().balancerSwitch(false, true);
 
-    ((ConnectionImplementation) HTU.getAdmin().getConnection()).setUseMetaReplicas(true);
+    ((ConnectionImplementation) HTU.getConnection()).setUseMetaReplicas(true);
 
     // Create table then get the single region for our new table.
     HTableDescriptor hdt = HTU.createTableDescriptor("testGetRegionLocationFromPrimaryMetaRegion");
@@ -684,12 +684,12 @@ public class TestReplicaWithCluster {
       RegionServerHostingPrimayMetaRegionSlowOrStopCopro.slowDownPrimaryMetaScan = true;
 
       // Get user table location, always get it from the primary meta replica
-      RegionLocations url = ((ClusterConnection) HTU.getConnection())
-          .locateRegion(hdt.getTableName(), row, false, false);
-
+      try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) {
+        locator.getRegionLocations(row, true);
+      }
     } finally {
       RegionServerHostingPrimayMetaRegionSlowOrStopCopro.slowDownPrimaryMetaScan = false;
-      ((ConnectionImplementation) HTU.getAdmin().getConnection()).setUseMetaReplicas(false);
+      ((ConnectionImplementation) HTU.getConnection()).setUseMetaReplicas(false);
       HTU.getAdmin().balancerSwitch(true, true);
       HTU.getAdmin().disableTable(hdt.getTableName());
       HTU.deleteTable(hdt.getTableName());
@@ -705,23 +705,25 @@ public class TestReplicaWithCluster {
   public void testReplicaGetWithPrimaryAndMetaDown() throws IOException, InterruptedException {
     HTU.getAdmin().balancerSwitch(false, true);
 
-    ((ConnectionImplementation)HTU.getAdmin().getConnection()).setUseMetaReplicas(true);
+    ((ConnectionImplementation)HTU.getConnection()).setUseMetaReplicas(true);
 
     // Create table then get the single region for our new table.
     HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaGetWithPrimaryAndMetaDown");
     hdt.setRegionReplication(2);
     try {
-
       Table table = HTU.createTable(hdt, new byte[][] { f }, null);
-
       // Get Meta location
-      RegionLocations mrl = ((ClusterConnection) HTU.getConnection())
-          .locateRegion(TableName.META_TABLE_NAME,
-              HConstants.EMPTY_START_ROW, false, false);
+      RegionLocations mrl;
+      try (
+          RegionLocator locator = HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+        mrl = new RegionLocations(locator.getRegionLocations(HConstants.EMPTY_START_ROW, true));
+      }
 
       // Get user table location
-      RegionLocations url = ((ClusterConnection) HTU.getConnection())
-          .locateRegion(hdt.getTableName(), row, false, false);
+      RegionLocations url;
+      try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) {
+        url = new RegionLocations(locator.getRegionLocations(row, true));
+      }
 
       // Make sure that user primary region is co-hosted with the meta region
       if (!url.getDefaultRegionLocation().getServerName().equals(
@@ -740,12 +742,15 @@ public class TestReplicaWithCluster {
 
       // Wait until the meta table is updated with new location info
       while (true) {
-        mrl = ((ClusterConnection) HTU.getConnection())
-            .locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, false, false);
+        try (RegionLocator locator =
+          HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+          mrl = new RegionLocations(locator.getRegionLocations(HConstants.EMPTY_START_ROW, true));
+        }
 
         // Get user table location
-        url = ((ClusterConnection) HTU.getConnection())
-            .locateRegion(hdt.getTableName(), row, false, true);
+        try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) {
+          url = new RegionLocations(locator.getRegionLocations(row, true));
+        }
 
         LOG.info("meta locations " + mrl);
         LOG.info("table locations " + url);
@@ -787,7 +792,7 @@ public class TestReplicaWithCluster {
       Assert.assertTrue(r.isStale());
 
     } finally {
-      ((ConnectionImplementation)HTU.getAdmin().getConnection()).setUseMetaReplicas(false);
+      ((ConnectionImplementation)HTU.getConnection()).setUseMetaReplicas(false);
       RegionServerHostingPrimayMetaRegionSlowOrStopCopro.throwException = false;
       HTU.getAdmin().balancerSwitch(true, true);
       HTU.getAdmin().disableTable(hdt.getTableName());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 3703f43..6616b3b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -229,7 +229,7 @@ public class TestReplicasClient {
 
   @Before
   public void before() throws IOException {
-    ((ClusterConnection) HTU.getAdmin().getConnection()).clearRegionLocationCache();
+    HTU.getConnection().clearRegionLocationCache();
     try {
       openRegion(hriPrimary);
     } catch (Exception ignored) {
@@ -250,8 +250,7 @@ public class TestReplicasClient {
       closeRegion(hriPrimary);
     } catch (Exception ignored) {
     }
-
-    ((ClusterConnection) HTU.getAdmin().getConnection()).clearRegionLocationCache();
+    HTU.getConnection().clearRegionLocationCache();
   }
 
   private HRegionServer getRS() {
@@ -329,7 +328,7 @@ public class TestReplicasClient {
   public void testLocations() throws Exception {
     byte[] b1 = Bytes.toBytes("testLocations");
     openRegion(hriSecondary);
-    ClusterConnection hc = (ClusterConnection) HTU.getAdmin().getConnection();
+    ConnectionImplementation hc = (ConnectionImplementation) HTU.getConnection();
 
     try {
       hc.clearRegionLocationCache();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
index f44ef28..b0ec37e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
@@ -204,7 +204,7 @@ public class TestSeparateClientZKCluster {
   public void testMetaMoveDuringClientZkClusterRestart() throws Exception {
     TableName tn = TableName.valueOf(name.getMethodName());
     // create table
-    ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
+    Connection conn = TEST_UTIL.getConnection();
     Admin admin = conn.getAdmin();
     HTable table = (HTable) conn.getTable(tn);
     try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
index 446c3f9..0c2532e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
@@ -25,8 +25,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.StartMiniClusterOption;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -58,32 +59,33 @@ public class TestMetaAssignmentWithStopMaster {
 
   @Test
   public void testStopActiveMaster() throws Exception {
-    ClusterConnection conn =
-        (ClusterConnection) ConnectionFactory.createConnection(UTIL.getConfiguration());
-    ServerName oldMetaServer = conn.locateRegions(TableName.META_TABLE_NAME).get(0).getServerName();
-    ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName();
+    try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration());
+        RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) {
+      ServerName oldMetaServer = locator.getAllRegionLocations().get(0).getServerName();
+      ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName();
 
-    UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test");
-    long startTime = System.currentTimeMillis();
-    while (UTIL.getMiniHBaseCluster().getMaster() == null || UTIL.getMiniHBaseCluster().getMaster()
-        .getServerName().equals(oldMaster)) {
-      LOG.info("Wait the standby master become active");
-      Thread.sleep(3000);
-      if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
-        fail("Wait too long for standby master become active");
+      UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test");
+      long startTime = System.currentTimeMillis();
+      while (UTIL.getMiniHBaseCluster().getMaster() == null ||
+        UTIL.getMiniHBaseCluster().getMaster().getServerName().equals(oldMaster)) {
+        LOG.info("Wait the standby master become active");
+        Thread.sleep(3000);
+        if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
+          fail("Wait too long for standby master become active");
+        }
       }
-    }
-    startTime = System.currentTimeMillis();
-    while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
-      LOG.info("Wait the new active master to be initialized");
-      Thread.sleep(3000);
-      if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
-        fail("Wait too long for the new active master to be initialized");
+      startTime = System.currentTimeMillis();
+      while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
+        LOG.info("Wait the new active master to be initialized");
+        Thread.sleep(3000);
+        if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
+          fail("Wait too long for the new active master to be initialized");
+        }
       }
-    }
 
-    ServerName newMetaServer = conn.locateRegions(TableName.META_TABLE_NAME).get(0).getServerName();
-    assertTrue("The new meta server " + newMetaServer + " should be same with" +
+      ServerName newMetaServer = locator.getAllRegionLocations().get(0).getServerName();
+      assertTrue("The new meta server " + newMetaServer + " should be same with" +
         " the old meta server " + oldMetaServer, newMetaServer.equals(oldMetaServer));
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
index a4b99a1..fcc1bb8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -366,24 +366,15 @@ public class TestLoadIncrementalHFilesSplitRecovery {
 
   private ClusterConnection getMockedConnection(final Configuration conf)
       throws IOException, org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
-    ClusterConnection c = Mockito.mock(ClusterConnection.class);
-    Mockito.when(c.getConfiguration()).thenReturn(conf);
-    Mockito.doNothing().when(c).close();
-    // Make it so we return a particular location when asked.
-    final HRegionLocation loc = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO,
-        ServerName.valueOf("example.org", 1234, 0));
-    Mockito.when(
-      c.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean()))
-        .thenReturn(loc);
-    Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())).thenReturn(loc);
-    ClientProtos.ClientService.BlockingInterface hri =
-        Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+    ServerName sn = ServerName.valueOf("example.org", 1234, 0);
+    RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
+    ClientProtos.ClientService.BlockingInterface client =
+      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
     Mockito
-        .when(
-          hri.bulkLoadHFile((RpcController) Mockito.any(), (BulkLoadHFileRequest) Mockito.any()))
-        .thenThrow(new ServiceException(new IOException("injecting bulk load error")));
-    Mockito.when(c.getClient(Mockito.any())).thenReturn(hri);
-    return c;
+      .when(
+        client.bulkLoadHFile((RpcController) Mockito.any(), (BulkLoadHFileRequest) Mockito.any()))
+      .thenThrow(new ServiceException(new IOException("injecting bulk load error")));
+    return HConnectionTestingUtility.getMockedConnectionAndDecorate(conf, null, client, sn, hri);
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index 40ce1c5..d25ccef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -307,8 +307,7 @@ public class BaseTestHBaseFsck {
       tbl.close();
       tbl = null;
     }
-
-    ((ClusterConnection) connection).clearRegionLocationCache();
+    connection.clearRegionLocationCache();
     deleteTable(TEST_UTIL, tablename);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 410dd0c..0a66ec0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -19,33 +19,34 @@ package org.apache.hadoop.hbase.util;
 import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.INCREMENT;
 import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.MUTATE_INFO;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
+
 /**
  * Common base class for reader and writer parts of multi-thread HBase load
  * test (See LoadTestTool).
@@ -491,7 +492,6 @@ public abstract class MultiThreadedAction {
   }
 
   private void printLocations(Result r) {
-    RegionLocations rl = null;
     if (r == null) {
       LOG.info("FAILED FOR null Result");
       return;
@@ -500,15 +500,14 @@ public abstract class MultiThreadedAction {
     if (r.getRow() == null) {
       return;
     }
-    try {
-      rl = ((ClusterConnection)connection).locateRegion(tableName, r.getRow(), true, true);
+    try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+      List<HRegionLocation> locs = locator.getRegionLocations(r.getRow());
+      for (HRegionLocation h : locs) {
+        LOG.info("LOCATION " + h);
+      }
     } catch (IOException e) {
       LOG.warn("Couldn't get locations for row " + Bytes.toString(r.getRow()));
     }
-    HRegionLocation locations[] = rl.getRegionLocations();
-    for (HRegionLocation h : locations) {
-      LOG.info("LOCATION " + h);
-    }
   }
 
   private String resultToString(Result result) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
index 6864366..27f5fb9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
@@ -21,14 +21,13 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Get;
-
 import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
@@ -377,8 +376,10 @@ public class MultiThreadedReader extends MultiThreadedAction
           numKeysVerified.incrementAndGet();
         }
       } else {
-        HRegionLocation hloc = connection.getRegionLocation(tableName,
-          get.getRow(), false);
+        HRegionLocation hloc;
+        try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+          hloc = locator.getRegionLocation(get.getRow());
+        }
         String rowKey = Bytes.toString(get.getRow());
         LOG.info("Key = " + rowKey + ", Region location: " + hloc);
         if(isNullExpected) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
index 54be0d3..1ebc9b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
@@ -27,10 +27,10 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
 import org.slf4j.Logger;
@@ -97,9 +97,9 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction {
 
   protected String getRegionDebugInfoSafe(Table table, byte[] rowKey) {
     HRegionLocation cached = null, real = null;
-    try {
-      cached = connection.getRegionLocation(tableName, rowKey, false);
-      real = connection.getRegionLocation(tableName, rowKey, true);
+    try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+      cached = locator.getRegionLocation(rowKey, false);
+      real = locator.getRegionLocation(rowKey, true);
     } catch (Throwable t) {
       // Cannot obtain region information for another catch block - too bad!
     }


[hbase] 16/27: HBASE-22238 Fix TestRpcControllerFactory

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 0d1ec0c1c56ab06ebdff02c94339468e228c6048
Author: zhangduo <zh...@apache.org>
AuthorDate: Sun Apr 14 21:30:34 2019 +0800

    HBASE-22238 Fix TestRpcControllerFactory
---
 .../hbase/client/TestRpcControllerFactory.java     | 171 ++++++++++-----------
 1 file changed, 77 insertions(+), 94 deletions(-)

diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java
index 2d60733..bdda4e8 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ProtobufCoprocessorService;
 import org.apache.hadoop.hbase.ipc.DelegatingHBaseRpcController;
@@ -52,12 +53,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ConcurrentHashMulti
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Multiset;
 
-@Category({MediumTests.class, ClientTests.class})
+@Category({ MediumTests.class, ClientTests.class })
 public class TestRpcControllerFactory {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestRpcControllerFactory.class);
+    HBaseClassTestRule.forClass(TestRpcControllerFactory.class);
 
   public static class StaticRpcControllerFactory extends RpcControllerFactory {
 
@@ -85,7 +86,6 @@ public class TestRpcControllerFactory {
 
     private static Multiset<Integer> GROUPED_PRIORITY = ConcurrentHashMultiset.create();
     private static AtomicInteger INT_PRIORITY = new AtomicInteger();
-    private static AtomicInteger TABLE_PRIORITY = new AtomicInteger();
 
     public CountingRpcController(HBaseRpcController delegate) {
       super(delegate);
@@ -93,24 +93,8 @@ public class TestRpcControllerFactory {
 
     @Override
     public void setPriority(int priority) {
-      int oldPriority = getPriority();
-      super.setPriority(priority);
-      int newPriority = getPriority();
-      if (newPriority != oldPriority) {
-        INT_PRIORITY.incrementAndGet();
-        GROUPED_PRIORITY.add(priority);
-      }
-    }
-
-    @Override
-    public void setPriority(TableName tn) {
-      super.setPriority(tn);
-      // ignore counts for system tables - it could change and we really only want to check on what
-      // the client should change
-      if (tn != null && !tn.isSystemTable()) {
-        TABLE_PRIORITY.incrementAndGet();
-      }
-
+      INT_PRIORITY.incrementAndGet();
+      GROUPED_PRIORITY.add(priority);
     }
   }
 
@@ -120,7 +104,7 @@ public class TestRpcControllerFactory {
   public TestName name = new TestName();
 
   @BeforeClass
-  public static void setup() throws Exception {
+  public static void setUp() throws Exception {
     // load an endpoint so we have an endpoint to test - it doesn't matter which one, but
     // this is already in tests, so we can just use it.
     Configuration conf = UTIL.getConfiguration();
@@ -131,7 +115,7 @@ public class TestRpcControllerFactory {
   }
 
   @AfterClass
-  public static void teardown() throws Exception {
+  public static void tearDown() throws Exception {
     UTIL.shutdownMiniCluster();
   }
 
@@ -154,84 +138,83 @@ public class TestRpcControllerFactory {
     // change one of the connection properties so we get a new Connection with our configuration
     conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT + 1);
 
-    Connection connection = ConnectionFactory.createConnection(conf);
-    Table table = connection.getTable(tableName);
-    byte[] row = Bytes.toBytes("row");
-    Put p = new Put(row);
-    p.addColumn(fam1, fam1, Bytes.toBytes("val0"));
-    table.put(p);
-
-    Integer counter = 1;
-    counter = verifyCount(counter);
-
-    Delete d = new Delete(row);
-    d.addColumn(fam1, fam1);
-    table.delete(d);
-    counter = verifyCount(counter);
-
-    Put p2 = new Put(row);
-    p2.addColumn(fam1, Bytes.toBytes("qual"), Bytes.toBytes("val1"));
-    table.batch(Lists.newArrayList(p, p2), null);
-    // this only goes to a single server, so we don't need to change the count here
-    counter = verifyCount(counter);
-
-    Append append = new Append(row);
-    append.addColumn(fam1, fam1, Bytes.toBytes("val2"));
-    table.append(append);
-    counter = verifyCount(counter);
-
-    // and check the major lookup calls as well
-    Get g = new Get(row);
-    table.get(g);
-    counter = verifyCount(counter);
-
-    ResultScanner scan = table.getScanner(fam1);
-    scan.next();
-    scan.close();
-    counter = verifyCount(counter + 1);
-
-    Get g2 = new Get(row);
-    table.get(Lists.newArrayList(g, g2));
-    // same server, so same as above for not changing count
-    counter = verifyCount(counter);
-
-    // make sure all the scanner types are covered
-    Scan scanInfo = new Scan(row);
-    // regular small
-    scanInfo.setSmall(true);
-    counter = doScan(table, scanInfo, counter);
-
-    // reversed, small
-    scanInfo.setReversed(true);
-    counter = doScan(table, scanInfo, counter);
-
-    // reversed, regular
-    scanInfo.setSmall(false);
-    counter = doScan(table, scanInfo, counter + 1);
-
-    // make sure we have no priority count
-    verifyPriorityGroupCount(HConstants.ADMIN_QOS, 0);
-    // lets set a custom priority on a get
-    Get get = new Get(row);
-    get.setPriority(HConstants.ADMIN_QOS);
-    table.get(get);
-    verifyPriorityGroupCount(HConstants.ADMIN_QOS, 1);
-
-    table.close();
-    connection.close();
+    try (Connection connection = ConnectionFactory.createConnection(conf);
+        Table table = connection.getTable(tableName)) {
+      byte[] row = Bytes.toBytes("row");
+      Put p = new Put(row);
+      p.addColumn(fam1, fam1, Bytes.toBytes("val0"));
+      table.put(p);
+
+      Integer counter = 1;
+      counter = verifyCount(counter);
+
+      Delete d = new Delete(row);
+      d.addColumn(fam1, fam1);
+      table.delete(d);
+      counter = verifyCount(counter);
+
+      Put p2 = new Put(row);
+      p2.addColumn(fam1, Bytes.toBytes("qual"), Bytes.toBytes("val1"));
+      table.batch(Lists.newArrayList(p, p2), null);
+      // this only goes to a single server, so we don't need to change the count here
+      counter = verifyCount(counter);
+
+      Append append = new Append(row);
+      append.addColumn(fam1, fam1, Bytes.toBytes("val2"));
+      table.append(append);
+      counter = verifyCount(counter);
+
+      // and check the major lookup calls as well
+      Get g = new Get(row);
+      table.get(g);
+      counter = verifyCount(counter);
+
+      ResultScanner scan = table.getScanner(fam1);
+      scan.next();
+      scan.close();
+      counter = verifyCount(counter + 1);
+
+      Get g2 = new Get(row);
+      table.get(Lists.newArrayList(g, g2));
+      // same server, so same as above for not changing count
+      counter = verifyCount(counter);
+
+      // make sure all the scanner types are covered
+      Scan scanInfo = new Scan().withStartRow(row);
+      // regular small
+      scanInfo.setReadType(ReadType.PREAD);
+      counter = doScan(table, scanInfo, counter);
+
+      // reversed, small
+      scanInfo.setReversed(true);
+      counter = doScan(table, scanInfo, counter);
+
+      // reversed, regular
+      scanInfo.setReadType(ReadType.STREAM);
+      counter = doScan(table, scanInfo, counter + 1);
+
+      // make sure we have no priority count
+      verifyPriorityGroupCount(HConstants.ADMIN_QOS, 0);
+      // lets set a custom priority on a get
+      Get get = new Get(row);
+      get.setPriority(HConstants.ADMIN_QOS);
+      table.get(get);
+      // we will reset the controller for setting the call timeout so it will lead to an extra
+      // setPriority
+      verifyPriorityGroupCount(HConstants.ADMIN_QOS, 2);
+    }
   }
 
   int doScan(Table table, Scan scan, int expectedCount) throws IOException {
-    ResultScanner results = table.getScanner(scan);
-    results.next();
-    results.close();
+    try (ResultScanner results = table.getScanner(scan)) {
+      results.next();
+    }
     return verifyCount(expectedCount);
   }
 
   int verifyCount(Integer counter) {
-    assertTrue(CountingRpcController.TABLE_PRIORITY.get() >= counter);
-    assertEquals(0, CountingRpcController.INT_PRIORITY.get());
-    return CountingRpcController.TABLE_PRIORITY.get() + 1;
+    assertTrue(CountingRpcController.INT_PRIORITY.get() >= counter);
+    return CountingRpcController.GROUPED_PRIORITY.count(HConstants.NORMAL_QOS) + 1;
   }
 
   void verifyPriorityGroupCount(int priorityLevel, int count) {


[hbase] 22/27: HBASE-22239 Also catch RemoteException in SyncReplicationTestBase.verifyReplicationRequestRejection

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 417c8d1f0ba4e3be2d55f18eafd58da2e12984b2
Author: zhangduo <zh...@apache.org>
AuthorDate: Thu Apr 25 22:43:51 2019 +0800

    HBASE-22239 Also catch RemoteException in SyncReplicationTestBase.verifyReplicationRequestRejection
---
 .../hadoop/hbase/replication/SyncReplicationTestBase.java  | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
index e0d112d..fd8df32 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -51,6 +53,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
+import org.apache.hadoop.ipc.RemoteException;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
@@ -247,6 +250,12 @@ public class SyncReplicationTestBase {
     }
   }
 
+  private void assertRejection(Throwable error) {
+    assertThat(error, instanceOf(DoNotRetryIOException.class));
+    assertTrue(error.getMessage().contains("Reject to apply to sink cluster"));
+    assertTrue(error.getMessage().contains(TABLE_NAME.toString()));
+  }
+
   protected final void verifyReplicationRequestRejection(HBaseTestingUtility utility,
       boolean expectedRejection) throws Exception {
     HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
@@ -264,9 +273,10 @@ public class SyncReplicationTestBase {
         ReplicationProtbufUtil.replicateWALEntry(
           connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null);
         fail("Should throw IOException when sync-replication state is in A or DA");
+      } catch (RemoteException e) {
+        assertRejection(e.unwrapRemoteException());
       } catch (DoNotRetryIOException e) {
-        assertTrue(e.getMessage().contains("Reject to apply to sink cluster"));
-        assertTrue(e.getMessage().contains(TABLE_NAME.toString()));
+        assertRejection(e);
       }
     }
   }


[hbase] 03/27: HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d4e107433ae582d9909e0d18715946fafef41af7
Author: zhangduo <zh...@apache.org>
AuthorDate: Thu Dec 6 21:25:34 2018 +0800

    HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin
---
 .../hbase/client/AsyncClusterConnection.java       |   6 +
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |   5 +
 .../hbase/client/AsyncRegionServerAdmin.java       | 210 +++++++++++++++++++++
 .../org/apache/hadoop/hbase/util/FutureUtils.java  |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |  22 ++-
 .../apache/hadoop/hbase/master/ServerManager.java  |  67 -------
 .../master/procedure/RSProcedureDispatcher.java    |  19 +-
 7 files changed, 244 insertions(+), 87 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index c7dea25..1327fd7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -27,6 +28,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface AsyncClusterConnection extends AsyncConnection {
 
   /**
+   * Get the admin service for the given region server.
+   */
+  AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName);
+
+  /**
    * Get the nonce generator for this connection.
    */
   NonceGenerator getNonceGenerator();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 4a5d0c3..62b9d8b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -379,4 +379,9 @@ class AsyncConnectionImpl implements AsyncClusterConnection {
   Optional<MetricsConnection> getConnectionMetrics() {
     return metrics;
   }
+
+  @Override
+  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
+    return new AsyncRegionServerAdmin(serverName, this);
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
new file mode 100644
index 0000000..9accd89
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
+
+/**
+ * A simple wrapper of the {@link AdminService} for a region server, which returns a
+ * {@link CompletableFuture}. This is easier to use, as if you use the raw protobuf interface, you
+ * need to get the result from the {@link RpcCallback}, and if there is an exception, you need to
+ * get it from the {@link RpcController} passed in.
+ * <p/>
+ * Notice that there is no retry, and this is intentional. We have different retry for different
+ * usage for now, if later we want to unify them, we can move the retry logic into this class.
+ */
+@InterfaceAudience.Private
+public class AsyncRegionServerAdmin {
+
+  private final ServerName server;
+
+  private final AsyncConnectionImpl conn;
+
+  AsyncRegionServerAdmin(ServerName server, AsyncConnectionImpl conn) {
+    this.server = server;
+    this.conn = conn;
+  }
+
+  @FunctionalInterface
+  private interface RpcCall<RESP> {
+    void call(AdminService.Interface stub, HBaseRpcController controller, RpcCallback<RESP> done);
+  }
+
+  private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall) {
+    CompletableFuture<RESP> future = new CompletableFuture<>();
+    HBaseRpcController controller = conn.rpcControllerFactory.newController();
+    try {
+      rpcCall.call(conn.getAdminStub(server), controller, new RpcCallback<RESP>() {
+
+        @Override
+        public void run(RESP resp) {
+          if (controller.failed()) {
+            future.completeExceptionally(controller.getFailed());
+          } else {
+            future.complete(resp);
+          }
+        }
+      });
+    } catch (IOException e) {
+      future.completeExceptionally(e);
+    }
+    return future;
+  }
+
+  public CompletableFuture<GetRegionInfoResponse> getRegionInfo(GetRegionInfoRequest request) {
+    return call((stub, controller, done) -> stub.getRegionInfo(controller, request, done));
+  }
+
+  public CompletableFuture<GetStoreFileResponse> getStoreFile(GetStoreFileRequest request) {
+    return call((stub, controller, done) -> stub.getStoreFile(controller, request, done));
+  }
+
+  public CompletableFuture<GetOnlineRegionResponse> getOnlineRegion(
+      GetOnlineRegionRequest request) {
+    return call((stub, controller, done) -> stub.getOnlineRegion(controller, request, done));
+  }
+
+  public CompletableFuture<OpenRegionResponse> openRegion(OpenRegionRequest request) {
+    return call((stub, controller, done) -> stub.openRegion(controller, request, done));
+  }
+
+  public CompletableFuture<WarmupRegionResponse> warmupRegion(WarmupRegionRequest request) {
+    return call((stub, controller, done) -> stub.warmupRegion(controller, request, done));
+  }
+
+  public CompletableFuture<CloseRegionResponse> closeRegion(CloseRegionRequest request) {
+    return call((stub, controller, done) -> stub.closeRegion(controller, request, done));
+  }
+
+  public CompletableFuture<FlushRegionResponse> flushRegion(FlushRegionRequest request) {
+    return call((stub, controller, done) -> stub.flushRegion(controller, request, done));
+  }
+
+  public CompletableFuture<CompactionSwitchResponse> compactionSwitch(
+      CompactionSwitchRequest request) {
+    return call((stub, controller, done) -> stub.compactionSwitch(controller, request, done));
+  }
+
+  public CompletableFuture<CompactRegionResponse> compactRegion(CompactRegionRequest request) {
+    return call((stub, controller, done) -> stub.compactRegion(controller, request, done));
+  }
+
+  public CompletableFuture<ReplicateWALEntryResponse> replicateWALEntry(
+      ReplicateWALEntryRequest request) {
+    return call((stub, controller, done) -> stub.replicateWALEntry(controller, request, done));
+  }
+
+  public CompletableFuture<ReplicateWALEntryResponse> replay(ReplicateWALEntryRequest request) {
+    return call((stub, controller, done) -> stub.replay(controller, request, done));
+  }
+
+  public CompletableFuture<RollWALWriterResponse> rollWALWriter(RollWALWriterRequest request) {
+    return call((stub, controller, done) -> stub.rollWALWriter(controller, request, done));
+  }
+
+  public CompletableFuture<GetServerInfoResponse> getServerInfo(GetServerInfoRequest request) {
+    return call((stub, controller, done) -> stub.getServerInfo(controller, request, done));
+  }
+
+  public CompletableFuture<StopServerResponse> stopServer(StopServerRequest request) {
+    return call((stub, controller, done) -> stub.stopServer(controller, request, done));
+  }
+
+  public CompletableFuture<UpdateFavoredNodesResponse> updateFavoredNodes(
+      UpdateFavoredNodesRequest request) {
+    return call((stub, controller, done) -> stub.updateFavoredNodes(controller, request, done));
+  }
+
+  public CompletableFuture<UpdateConfigurationResponse> updateConfiguration(
+      UpdateConfigurationRequest request) {
+    return call((stub, controller, done) -> stub.updateConfiguration(controller, request, done));
+  }
+
+  public CompletableFuture<GetRegionLoadResponse> getRegionLoad(GetRegionLoadRequest request) {
+    return call((stub, controller, done) -> stub.getRegionLoad(controller, request, done));
+  }
+
+  public CompletableFuture<ClearCompactionQueuesResponse> clearCompactionQueues(
+      ClearCompactionQueuesRequest request) {
+    return call((stub, controller, done) -> stub.clearCompactionQueues(controller, request, done));
+  }
+
+  public CompletableFuture<ClearRegionBlockCacheResponse> clearRegionBlockCache(
+      ClearRegionBlockCacheRequest request) {
+    return call((stub, controller, done) -> stub.clearRegionBlockCache(controller, request, done));
+  }
+
+  public CompletableFuture<GetSpaceQuotaSnapshotsResponse> getSpaceQuotaSnapshots(
+      GetSpaceQuotaSnapshotsRequest request) {
+    return call((stub, controller, done) -> stub.getSpaceQuotaSnapshots(controller, request, done));
+  }
+
+  public CompletableFuture<ExecuteProceduresResponse> executeProcedures(
+      ExecuteProceduresRequest request) {
+    return call((stub, controller, done) -> stub.executeProcedures(controller, request, done));
+  }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
index c0c7fbd..83950ec 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
@@ -164,4 +164,4 @@ public final class FutureUtils {
     future.completeExceptionally(e);
     return future;
   }
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 45c2548..10f3632 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -202,6 +202,7 @@ import org.apache.hadoop.hbase.util.BloomFilterUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.EncryptionTest;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.HBaseFsck;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -234,6 +235,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
@@ -1960,6 +1962,15 @@ public class HMaster extends HRegionServer implements MasterServices {
     });
   }
 
+  private void warmUpRegion(ServerName server, RegionInfo region) {
+    FutureUtils.addListener(asyncClusterConnection.getRegionServerAdmin(server)
+      .warmupRegion(RequestConverter.buildWarmupRegionRequest(region)), (r, e) -> {
+        if (e != null) {
+          LOG.warn("Failed to warm up region {} on server {}", region, server, e);
+        }
+      });
+  }
+
   // Public so can be accessed by tests. Blocks until move is done.
   // Replace with an async implementation from which you can get
   // a success/failure result.
@@ -2030,11 +2041,12 @@ public class HMaster extends HRegionServer implements MasterServices {
       }
 
       TransitRegionStateProcedure proc =
-          this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination());
-      // Warmup the region on the destination before initiating the move. this call
-      // is synchronous and takes some time. doing it before the source region gets
-      // closed
-      serverManager.sendRegionWarmup(rp.getDestination(), hri);
+        this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination());
+      // Warmup the region on the destination before initiating the move.
+      // A region server could reject the close request because it either does not
+      // have the specified region or the region is being split.
+      warmUpRegion(rp.getDestination(), hri);
+
       LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
       Future<byte[]> future = ProcedureSyncWait.submitProcedure(this.procedureExecutor, proc);
       try {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 88edb79..eb41144 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -51,12 +50,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -159,25 +155,16 @@ public class ServerManager {
   private final ConcurrentNavigableMap<ServerName, ServerMetrics> onlineServers =
     new ConcurrentSkipListMap<>();
 
-  /**
-   * Map of admin interfaces per registered regionserver; these interfaces we use to control
-   * regionservers out on the cluster
-   */
-  private final Map<ServerName, AdminService.BlockingInterface> rsAdmins = new HashMap<>();
-
   /** List of region servers that should not get any more new regions. */
   private final ArrayList<ServerName> drainingServers = new ArrayList<>();
 
   private final MasterServices master;
-  private final ClusterConnection connection;
 
   private final DeadServer deadservers = new DeadServer();
 
   private final long maxSkew;
   private final long warningSkew;
 
-  private final RpcControllerFactory rpcControllerFactory;
-
   /** Listeners that are called on server events. */
   private List<ServerListener> listeners = new CopyOnWriteArrayList<>();
 
@@ -189,8 +176,6 @@ public class ServerManager {
     Configuration c = master.getConfiguration();
     maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
     warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
-    this.connection = master.getClusterConnection();
-    this.rpcControllerFactory = this.connection == null? null: connection.getRpcControllerFactory();
     persistFlushedSequenceId = c.getBoolean(PERSIST_FLUSHEDSEQUENCEID,
         PERSIST_FLUSHEDSEQUENCEID_DEFAULT);
   }
@@ -438,7 +423,6 @@ public class ServerManager {
   void recordNewServerWithLock(final ServerName serverName, final ServerMetrics sl) {
     LOG.info("Registering regionserver=" + serverName);
     this.onlineServers.put(serverName, sl);
-    this.rsAdmins.remove(serverName);
   }
 
   @VisibleForTesting
@@ -633,7 +617,6 @@ public class ServerManager {
       this.onlineServers.remove(sn);
       onlineServers.notifyAll();
     }
-    this.rsAdmins.remove(sn);
   }
 
   /*
@@ -676,34 +659,6 @@ public class ServerManager {
     return this.drainingServers.add(sn);
   }
 
-  // RPC methods to region servers
-
-  private HBaseRpcController newRpcController() {
-    return rpcControllerFactory == null ? null : rpcControllerFactory.newController();
-  }
-
-  /**
-   * Sends a WARMUP RPC to the specified server to warmup the specified region.
-   * <p>
-   * A region server could reject the close request because it either does not
-   * have the specified region or the region is being split.
-   * @param server server to warmup a region
-   * @param region region to  warmup
-   */
-  public void sendRegionWarmup(ServerName server,
-      RegionInfo region) {
-    if (server == null) return;
-    try {
-      AdminService.BlockingInterface admin = getRsAdmin(server);
-      HBaseRpcController controller = newRpcController();
-      ProtobufUtil.warmupRegion(controller, admin, region);
-    } catch (IOException e) {
-      LOG.error("Received exception in RPC for warmup server:" +
-        server + "region: " + region +
-        "exception: " + e);
-    }
-  }
-
   /**
    * Contacts a region server and waits up to timeout ms
    * to close the region.  This bypasses the active hmaster.
@@ -737,28 +692,6 @@ public class ServerManager {
   }
 
   /**
-   * @param sn
-   * @return Admin interface for the remote regionserver named <code>sn</code>
-   * @throws IOException
-   * @throws RetriesExhaustedException wrapping a ConnectException if failed
-   */
-  public AdminService.BlockingInterface getRsAdmin(final ServerName sn)
-  throws IOException {
-    AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
-    if (admin == null) {
-      LOG.debug("New admin connection to " + sn.toString());
-      if (sn.equals(master.getServerName()) && master instanceof HRegionServer) {
-        // A master is also a region server now, see HBASE-10569 for details
-        admin = ((HRegionServer)master).getRSRpcServices();
-      } else {
-        admin = this.connection.getAdmin(sn);
-      }
-      this.rsAdmins.put(sn, admin);
-    }
-    return admin;
-  }
-
-  /**
    * Calculate min necessary to start. This is not an absolute. It is just
    * a friction that will cause us hang around a bit longer waiting on
    * RegionServers to check-in.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index b469cb8..cb1e12c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -25,6 +25,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.CallQueueTooBigException;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
 import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -41,11 +43,9 @@ import org.slf4j.LoggerFactory;
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
 import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
@@ -219,13 +219,8 @@ public class RSProcedureDispatcher
       this.remoteProcedures = remoteProcedures;
     }
 
-    private AdminService.BlockingInterface getRsAdmin() throws IOException {
-      final AdminService.BlockingInterface admin = master.getServerManager().getRsAdmin(serverName);
-      if (admin == null) {
-        throw new IOException("Attempting to send OPEN RPC to server " + getServerName() +
-          " failed because no RPC connection found to this server");
-      }
-      return admin;
+    private AsyncRegionServerAdmin  getRsAdmin() throws IOException {
+      return master.getAsyncClusterConnection().getRegionServerAdmin(serverName);
     }
 
     protected final ServerName getServerName() {
@@ -345,11 +340,7 @@ public class RSProcedureDispatcher
     @VisibleForTesting
     protected ExecuteProceduresResponse sendRequest(final ServerName serverName,
         final ExecuteProceduresRequest request) throws IOException {
-      try {
-        return getRsAdmin().executeProcedures(null, request);
-      } catch (ServiceException se) {
-        throw ProtobufUtil.getRemoteException(se);
-      }
+      return FutureUtils.get(getRsAdmin().executeProcedures(request));
     }
 
     protected final void remoteCallFailed(final MasterProcedureEnv env, final IOException e) {