You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2019/06/02 03:40:37 UTC
[hbase] 11/27: HBASE-21585 Remove ClusterConnection
This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch HBASE-21512
in repository https://gitbox.apache.org/repos/asf/hbase.git
commit 306534b402617e7b19a30674a13105972f0a8d45
Author: zhangduo <zh...@apache.org>
AuthorDate: Mon Feb 11 20:32:21 2019 +0800
HBASE-21585 Remove ClusterConnection
---
.../hadoop/hbase/client/BufferedMutatorImpl.java | 5 +-
.../client/CancellableRegionServerCallable.java | 6 +-
.../hadoop/hbase/client/ClientServiceCallable.java | 12 +-
.../hadoop/hbase/client/ClusterConnection.java | 181 ---------------------
.../hadoop/hbase/client/ConnectionFactory.java | 2 +-
.../hbase/client/ConnectionImplementation.java | 140 ++++++++++++----
.../hadoop/hbase/client/ConnectionUtils.java | 12 +-
.../org/apache/hadoop/hbase/client/HBaseAdmin.java | 33 ++--
.../org/apache/hadoop/hbase/client/HBaseHbck.java | 17 +-
.../org/apache/hadoop/hbase/client/HTable.java | 27 ++-
.../java/org/apache/hadoop/hbase/client/Hbck.java | 4 +-
.../apache/hadoop/hbase/client/MasterCallable.java | 7 +-
.../hadoop/hbase/client/MultiServerCallable.java | 12 +-
.../hbase/client/NoncedRegionServerCallable.java | 4 +-
.../hbase/client/RegionCoprocessorRpcChannel.java | 4 +-
.../hadoop/hbase/client/RegionServerCallable.java | 12 +-
.../hadoop/hbase/client/ReversedClientScanner.java | 7 -
.../hbase/client/ReversedScannerCallable.java | 8 +-
.../hadoop/hbase/client/ScannerCallable.java | 11 +-
.../hbase/client/ScannerCallableWithReplicas.java | 2 +-
.../hadoop/hbase/client/TestAsyncProcess.java | 7 +-
.../hadoop/hbase/client/TestBufferedMutator.java | 3 +-
.../hadoop/hbase/DistributedHBaseCluster.java | 27 +--
.../mapreduce/TestMultiTableInputFormatBase.java | 4 +-
.../hbase/mapreduce/TestTableInputFormatBase.java | 10 +-
.../main/java/org/apache/hadoop/hbase/Server.java | 9 -
.../hbase/backup/example/ZKTableArchiveClient.java | 11 +-
.../hbase/client/AsyncClusterConnection.java | 2 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 10 +-
.../master/assignment/AssignmentManagerUtil.java | 20 +--
.../quotas/RegionServerSpaceQuotaManager.java | 2 +-
.../DisableTableViolationPolicyEnforcement.java | 3 +-
.../hadoop/hbase/regionserver/HRegionServer.java | 39 ++---
.../regionserver/DumpReplicationQueues.java | 4 +-
.../regionserver/ReplicationSyncUp.java | 8 +-
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 39 ++---
.../apache/hadoop/hbase/util/HBaseFsckRepair.java | 1 -
.../apache/hadoop/hbase/util/MultiHConnection.java | 15 +-
.../apache/hadoop/hbase/util/RegionSplitter.java | 15 +-
.../main/resources/hbase-webapps/master/table.jsp | 2 +-
.../java/org/apache/hadoop/hbase/HBaseCluster.java | 23 ---
.../org/apache/hadoop/hbase/MiniHBaseCluster.java | 24 ---
.../hadoop/hbase/MockRegionServerServices.java | 10 +-
.../example/TestZooKeeperTableArchiveClient.java | 6 +-
.../hbase/client/HConnectionTestingUtility.java | 19 +--
.../org/apache/hadoop/hbase/client/TestAdmin1.java | 29 ++--
.../org/apache/hadoop/hbase/client/TestAdmin2.java | 2 +-
.../hbase/client/TestAsyncTableAdminApi.java | 2 +-
.../apache/hadoop/hbase/client/TestCISleep.java | 12 +-
.../hbase/client/TestConnectionImplementation.java | 5 +-
.../hadoop/hbase/client/TestFromClientSide3.java | 6 +-
.../client/TestMetaTableAccessorNoCluster.java | 10 +-
.../hadoop/hbase/client/TestMultiParallel.java | 5 +-
.../hadoop/hbase/client/TestReplicasClient.java | 22 ++-
.../hbase/client/TestSeparateClientZKCluster.java | 2 +-
.../hbase/client/TestShortCircuitConnection.java | 2 +-
.../hbase/master/MockNoopMasterServices.java | 8 +-
.../hadoop/hbase/master/MockRegionServer.java | 8 +-
.../hbase/master/TestActiveMasterManager.java | 9 +-
.../hbase/master/TestClockSkewDetection.java | 13 +-
.../hadoop/hbase/master/TestMasterNoCluster.java | 13 +-
.../master/assignment/MockMasterServices.java | 6 +-
.../hbase/master/cleaner/TestHFileCleaner.java | 9 +-
.../hbase/master/cleaner/TestHFileLinkCleaner.java | 10 +-
.../hbase/master/cleaner/TestLogsCleaner.java | 8 +-
.../cleaner/TestReplicationHFileCleaner.java | 9 +-
.../procedure/MasterProcedureTestingUtility.java | 4 +-
.../regionserver/TestHRegionServerBulkLoad.java | 35 +---
.../hbase/regionserver/TestHeapMemoryManager.java | 9 +-
.../hbase/regionserver/TestSplitLogWorker.java | 9 +-
.../hadoop/hbase/regionserver/TestWALLockup.java | 8 +-
.../replication/TestReplicationTrackerZKImpl.java | 9 +-
.../regionserver/TestReplicationSourceManager.java | 10 +-
.../security/token/TestTokenAuthentication.java | 9 +-
.../hadoop/hbase/util/BaseTestHBaseFsck.java | 13 +-
.../org/apache/hadoop/hbase/util/MockServer.java | 10 +-
.../hadoop/hbase/util/MultiThreadedAction.java | 6 +-
.../apache/hadoop/hbase/util/TestHBaseFsckMOB.java | 3 +-
.../hbase/thrift2/client/ThriftConnection.java | 2 -
.../hadoop/hbase/thrift2/TestThriftConnection.java | 4 +-
80 files changed, 375 insertions(+), 765 deletions(-)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index 922611b..6d70219 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -63,7 +63,7 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
-public class BufferedMutatorImpl implements BufferedMutator {
+class BufferedMutatorImpl implements BufferedMutator {
private static final Logger LOG = LoggerFactory.getLogger(BufferedMutatorImpl.class);
@@ -95,7 +95,8 @@ public class BufferedMutatorImpl implements BufferedMutator {
private final AsyncProcess ap;
@VisibleForTesting
- BufferedMutatorImpl(ClusterConnection conn, BufferedMutatorParams params, AsyncProcess ap) {
+ BufferedMutatorImpl(ConnectionImplementation conn, BufferedMutatorParams params,
+ AsyncProcess ap) {
if (conn == null || conn.isClosed()) {
throw new IllegalArgumentException("Connection is null or closed.");
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java
index 6ad9254..f81018e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java
@@ -39,8 +39,10 @@ abstract class CancellableRegionServerCallable<T> extends ClientServiceCallable<
Cancellable {
private final RetryingTimeTracker tracker;
private final int rpcTimeout;
- CancellableRegionServerCallable(Connection connection, TableName tableName, byte[] row,
- RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, int priority) {
+
+ CancellableRegionServerCallable(ConnectionImplementation connection, TableName tableName,
+ byte[] row, RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker,
+ int priority) {
super(connection, tableName, row, rpcController, priority);
this.rpcTimeout = rpcTimeout;
this.tracker = tracker;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
index 67ba838..c7006a8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
@@ -31,12 +31,12 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
* @param <T>
*/
@InterfaceAudience.Private
-public abstract class ClientServiceCallable<T> extends
- RegionServerCallable<T, ClientProtos.ClientService.BlockingInterface> {
+public abstract class ClientServiceCallable<T>
+ extends RegionServerCallable<T, ClientProtos.ClientService.BlockingInterface> {
- public ClientServiceCallable(Connection connection, TableName tableName, byte[] row,
+ public ClientServiceCallable(ConnectionImplementation connection, TableName tableName, byte[] row,
RpcController rpcController, int priority) {
- super((ConnectionImplementation) connection, tableName, row, rpcController, priority);
+ super(connection, tableName, row, rpcController, priority);
}
@Override
@@ -46,12 +46,12 @@ public abstract class ClientServiceCallable<T> extends
// Below here are simple methods that contain the stub and the rpcController.
protected ClientProtos.GetResponse doGet(ClientProtos.GetRequest request)
- throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
+ throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
return getStub().get(getRpcController(), request);
}
protected ClientProtos.MutateResponse doMutate(ClientProtos.MutateRequest request)
- throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
+ throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
return getStub().mutate(getRpcController(), request);
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
deleted file mode 100644
index 092bd24..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.yetus.audience.InterfaceAudience;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-
-/** Internal methods on Connection that should not be used by user code. */
-@InterfaceAudience.Private
-// NOTE: Although this class is public, this class is meant to be used directly from internal
-// classes and unit tests only.
-public interface ClusterConnection extends Connection {
-
- /**
- * Key for configuration in Configuration whose value is the class we implement making a
- * new Connection instance.
- */
- String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl";
-
- /**
- * @return - true if the master server is running
- * @deprecated this has been deprecated without a replacement
- */
- @Deprecated
- boolean isMasterRunning()
- throws MasterNotRunningException, ZooKeeperConnectionException;
-
- /**
- * Use this api to check if the table has been created with the specified number of
- * splitkeys which was used while creating the given table.
- * Note : If this api is used after a table's region gets splitted, the api may return
- * false.
- * @param tableName
- * tableName
- * @param splitKeys
- * splitKeys used while creating table
- * @throws IOException
- * if a remote or network exception occurs
- */
- boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws
- IOException;
-
- /**
- * A table that isTableEnabled == false and isTableDisabled == false
- * is possible. This happens when a table has a lot of regions
- * that must be processed.
- * @param tableName table name
- * @return true if the table is enabled, false otherwise
- * @throws IOException if a remote or network exception occurs
- */
- boolean isTableEnabled(TableName tableName) throws IOException;
-
- /**
- * @param tableName table name
- * @return true if the table is disabled, false otherwise
- * @throws IOException if a remote or network exception occurs
- */
- boolean isTableDisabled(TableName tableName) throws IOException;
-
- /**
- * Retrieve TableState, represent current table state.
- * @param tableName table state for
- * @return state of the table
- */
- TableState getTableState(TableName tableName) throws IOException;
-
- /**
- * Returns a {@link MasterKeepAliveConnection} to the active master
- */
- MasterKeepAliveConnection getMaster() throws IOException;
-
- /**
- * Get the admin service for master.
- */
- AdminService.BlockingInterface getAdminForMaster() throws IOException;
-
- /**
- * Establishes a connection to the region server at the specified address.
- * @param serverName the region server to connect to
- * @return proxy for HRegionServer
- * @throws IOException if a remote or network exception occurs
- */
- AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException;
-
- /**
- * Establishes a connection to the region server at the specified address, and returns
- * a region client protocol.
- *
- * @param serverName the region server to connect to
- * @return ClientProtocol proxy for RegionServer
- * @throws IOException if a remote or network exception occurs
- *
- */
- ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException;
-
- /**
- * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration.
- */
- NonceGenerator getNonceGenerator();
-
- /**
- * @return Default AsyncProcess associated with this connection.
- */
- AsyncProcess getAsyncProcess();
-
- /**
- * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}.
- * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be
- * intercepted with the configured {@link RetryingCallerInterceptor}
- * @param conf configuration
- * @return RpcRetryingCallerFactory
- */
- RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf);
-
- /**
- * @return Connection's RpcRetryingCallerFactory instance
- */
- RpcRetryingCallerFactory getRpcRetryingCallerFactory();
-
- /**
- * @return Connection's RpcControllerFactory instance
- */
- RpcControllerFactory getRpcControllerFactory();
-
- /**
- * @return a ConnectionConfiguration object holding parsed configuration values
- */
- ConnectionConfiguration getConnectionConfiguration();
-
- /**
- * @return the current statistics tracker associated with this connection
- */
- ServerStatisticTracker getStatisticsTracker();
-
- /**
- * @return the configured client backoff policy
- */
- ClientBackoffPolicy getBackoffPolicy();
-
- /**
- * @return the MetricsConnection instance associated with this connection.
- */
- MetricsConnection getConnectionMetrics();
-
- /**
- * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so
- * supports cell blocks.
- */
- boolean hasCellBlockSupport();
-
- /**
- * @return the number of region servers that are currently running
- * @throws IOException if a remote or network exception occurs
- */
- int getCurrentNrHRS() throws IOException;
-}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index b984a99..ceef356 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -212,7 +212,7 @@ public class ConnectionFactory {
*/
public static Connection createConnection(Configuration conf, ExecutorService pool,
final User user) throws IOException {
- String className = conf.get(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL,
+ String className = conf.get(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
ConnectionImplementation.class.getName());
Class<?> clazz;
try {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 49fa81b..de377c7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -152,7 +152,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat
value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
justification="Access to the conncurrent hash map is under a lock so should be fine.")
@InterfaceAudience.Private
-class ConnectionImplementation implements ClusterConnection, Closeable {
+class ConnectionImplementation implements Connection, Closeable {
public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server";
private static final Logger LOG = LoggerFactory.getLogger(ConnectionImplementation.class);
@@ -354,9 +354,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
*/
@VisibleForTesting
static NonceGenerator injectNonceGeneratorForTesting(
- ClusterConnection conn, NonceGenerator cnm) {
- ConnectionImplementation connImpl = (ConnectionImplementation)conn;
- NonceGenerator ng = connImpl.getNonceGenerator();
+ ConnectionImplementation conn, NonceGenerator cnm) {
+ NonceGenerator ng = conn.getNonceGenerator();
LOG.warn("Nonce generator is being replaced by test code for "
+ cnm.getClass().getName());
nonceGenerator = cnm;
@@ -456,7 +455,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
}), rpcControllerFactory);
}
- @Override
+ /**
+ * @return the MetricsConnection instance associated with this connection.
+ */
public MetricsConnection getConnectionMetrics() {
return this.metrics;
}
@@ -600,7 +601,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
* @deprecated this has been deprecated without a replacement
*/
@Deprecated
- @Override
public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException {
// When getting the master connection, we check it's running,
// so if there is no exception, it means we've been able to get a
@@ -628,18 +628,39 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row);
}
-
- @Override
+ /**
+ * A table that isTableEnabled == false and isTableDisabled == false
+ * is possible. This happens when a table has a lot of regions
+ * that must be processed.
+ * @param tableName table name
+ * @return true if the table is enabled, false otherwise
+ * @throws IOException if a remote or network exception occurs
+ */
public boolean isTableEnabled(TableName tableName) throws IOException {
return getTableState(tableName).inStates(TableState.State.ENABLED);
}
- @Override
+ /**
+ * @param tableName table name
+ * @return true if the table is disabled, false otherwise
+ * @throws IOException if a remote or network exception occurs
+ */
public boolean isTableDisabled(TableName tableName) throws IOException {
return getTableState(tableName).inStates(TableState.State.DISABLED);
}
- @Override
+ /**
+ * Use this api to check if the table has been created with the specified number of
+ * splitkeys which was used while creating the given table.
+ * Note : If this api is used after a table's region gets splitted, the api may return
+ * false.
+ * @param tableName
+ * tableName
+ * @param splitKeys
+ * splitKeys used while creating table
+ * @throws IOException
+ * if a remote or network exception occurs
+ */
public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys)
throws IOException {
checkClosed();
@@ -809,15 +830,14 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
}
/**
- *
- * @param tableName table to get regions of
- * @param row the row
- * @param useCache Should we use the cache to retrieve the region information.
- * @param retry do we retry
- * @param replicaId the replicaId for the region
- * @return region locations for this row.
- * @throws IOException if IO failure occurs
- */
+ * @param tableName table to get regions of
+ * @param row the row
+ * @param useCache Should we use the cache to retrieve the region information.
+ * @param retry do we retry
+ * @param replicaId the replicaId for the region
+ * @return region locations for this row.
+ * @throws IOException if IO failure occurs
+ */
RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache,
boolean retry, int replicaId) throws IOException {
checkClosed();
@@ -1048,6 +1068,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
metaCache.clearCache(serverName);
}
+
+ /**
+ * Allows flushing the region cache.
+ */
@Override
public void clearRegionLocationCache() {
metaCache.clearCache();
@@ -1258,12 +1282,19 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
}
}
- @Override
+ /**
+ * Get the admin service for master.
+ */
public AdminProtos.AdminService.BlockingInterface getAdminForMaster() throws IOException {
return getAdmin(get(registry.getMasterAddress()));
}
- @Override
+ /**
+ * Establishes a connection to the region server at the specified address.
+ * @param serverName the region server to connect to
+ * @return proxy for HRegionServer
+ * @throws IOException if a remote or network exception occurs
+ */
public AdminProtos.AdminService.BlockingInterface getAdmin(ServerName serverName)
throws IOException {
checkClosed();
@@ -1279,7 +1310,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
});
}
- @Override
+ /**
+ * Establishes a connection to the region server at the specified address, and returns a region
+ * client protocol.
+ * @param serverName the region server to connect to
+ * @return ClientProtocol proxy for RegionServer
+ * @throws IOException if a remote or network exception occurs
+ */
public BlockingInterface getClient(ServerName serverName) throws IOException {
checkClosed();
if (isDeadServer(serverName)) {
@@ -1289,14 +1326,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
serverName, this.hostnamesCanChange);
return (ClientProtos.ClientService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> {
BlockingRpcChannel channel =
- this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout);
+ this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout);
return ClientProtos.ClientService.newBlockingStub(channel);
});
}
final MasterServiceState masterServiceState = new MasterServiceState(this);
- @Override
public MasterKeepAliveConnection getMaster() throws IOException {
return getKeepAliveMasterService();
}
@@ -1927,6 +1963,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
cacheLocation(hri.getTable(), source, newHrl);
}
+ /**
+ * Deletes cached locations for the specific region.
+ * @param location The location object for the region, to be purged from cache.
+ */
void deleteCachedRegionLocation(final HRegionLocation location) {
metaCache.clearCache(location);
}
@@ -2005,17 +2045,23 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
metaCache.clearCache(regionInfo);
}
- @Override
+ /**
+ * @return Default AsyncProcess associated with this connection.
+ */
public AsyncProcess getAsyncProcess() {
return asyncProcess;
}
- @Override
+ /**
+ * @return the current statistics tracker associated with this connection
+ */
public ServerStatisticTracker getStatisticsTracker() {
return this.stats;
}
- @Override
+ /**
+ * @return the configured client backoff policy
+ */
public ClientBackoffPolicy getBackoffPolicy() {
return this.backoffPolicy;
}
@@ -2051,7 +2097,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
return this.aborted;
}
- @Override
+ /**
+ * @return the number of region servers that are currently running
+ * @throws IOException if a remote or network exception occurs
+ */
public int getCurrentNrHRS() throws IOException {
return get(this.registry.getCurrentNrHRS());
}
@@ -2094,12 +2143,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
close();
}
- @Override
+ /**
+ * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration.
+ */
public NonceGenerator getNonceGenerator() {
return nonceGenerator;
}
- @Override
+ /**
+ * Retrieve TableState, represent current table state.
+ * @param tableName table state for
+ * @return state of the table
+ */
public TableState getTableState(TableName tableName) throws IOException {
checkClosed();
TableState tableState = MetaTableAccessor.getTableState(this, tableName);
@@ -2109,28 +2164,43 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
return tableState;
}
- @Override
+ /**
+ * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}.
+ * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be
+ * intercepted with the configured {@link RetryingCallerInterceptor}
+ * @param conf configuration
+ * @return RpcRetryingCallerFactory
+ */
public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) {
return RpcRetryingCallerFactory
.instantiate(conf, this.interceptor, this.getStatisticsTracker());
}
- @Override
+ /**
+ * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so
+ * supports cell blocks.
+ */
public boolean hasCellBlockSupport() {
return this.rpcClient.hasCellBlockSupport();
}
- @Override
+ /**
+ * @return a ConnectionConfiguration object holding parsed configuration values
+ */
public ConnectionConfiguration getConnectionConfiguration() {
return this.connectionConfig;
}
- @Override
+ /**
+ * @return Connection's RpcRetryingCallerFactory instance
+ */
public RpcRetryingCallerFactory getRpcRetryingCallerFactory() {
return this.rpcCallerFactory;
}
- @Override
+ /**
+ * @return Connection's RpcControllerFactory instance
+ */
public RpcControllerFactory getRpcControllerFactory() {
return this.rpcControllerFactory;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 4ec7e32..fe1dd3e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -80,6 +80,12 @@ public final class ConnectionUtils {
private static final Logger LOG = LoggerFactory.getLogger(ConnectionUtils.class);
+ /**
+ * Key for configuration in Configuration whose value is the class we implement making a new
+ * Connection instance.
+ */
+ public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl";
+
private ConnectionUtils() {
}
@@ -109,7 +115,7 @@ public final class ConnectionUtils {
* @param cnm Replaces the nonce generator used, for testing.
* @return old nonce generator.
*/
- public static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn,
+ public static NonceGenerator injectNonceGeneratorForTesting(ConnectionImplementation conn,
NonceGenerator cnm) {
return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm);
}
@@ -186,7 +192,7 @@ public final class ConnectionUtils {
* @return an short-circuit connection.
* @throws IOException if IO failure occurred
*/
- public static ClusterConnection createShortCircuitConnection(final Configuration conf,
+ public static ConnectionImplementation createShortCircuitConnection(final Configuration conf,
ExecutorService pool, User user, final ServerName serverName,
final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client)
throws IOException {
@@ -202,7 +208,7 @@ public final class ConnectionUtils {
*/
@VisibleForTesting
public static void setupMasterlessConnection(Configuration conf) {
- conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName());
+ conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName());
}
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index f553960..55b83ee 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -446,7 +446,7 @@ public class HBaseAdmin implements Admin {
/** @return Connection used by this object. */
@Override
- public Connection getConnection() {
+ public ConnectionImplementation getConnection() {
return connection;
}
@@ -485,23 +485,24 @@ public class HBaseAdmin implements Admin {
});
}
- static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
- RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
- int operationTimeout, int rpcTimeout) throws IOException {
+ static TableDescriptor getTableDescriptor(final TableName tableName,
+ ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory,
+ final RpcControllerFactory rpcControllerFactory, int operationTimeout, int rpcTimeout)
+ throws IOException {
if (tableName == null) return null;
TableDescriptor td =
- executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
- @Override
- protected TableDescriptor rpcCall() throws Exception {
- GetTableDescriptorsRequest req =
+ executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
+ @Override
+ protected TableDescriptor rpcCall() throws Exception {
+ GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableName);
- GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
- if (!htds.getTableSchemaList().isEmpty()) {
- return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
+ GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
+ if (!htds.getTableSchemaList().isEmpty()) {
+ return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
+ }
+ return null;
}
- return null;
- }
- }, rpcCallerFactory, operationTimeout, rpcTimeout);
+ }, rpcCallerFactory, operationTimeout, rpcTimeout);
if (td != null) {
return td;
}
@@ -2027,8 +2028,8 @@ public class HBaseAdmin implements Admin {
// Check ZK first.
// If the connection exists, we may have a connection to ZK that does not work anymore
- try (ClusterConnection connection =
- (ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) {
+ try (ConnectionImplementation connection =
+ (ConnectionImplementation) ConnectionFactory.createConnection(copyOfConf)) {
// can throw MasterNotRunningException
connection.isMasterRunning();
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java
index 79a75d0..c77a736 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java
@@ -22,10 +22,15 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.stream.Collectors;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
@@ -33,16 +38,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService.BlockingInterface;
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-
-import org.apache.yetus.audience.InterfaceAudience;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
- * Use {@link ClusterConnection#getHbck()} to obtain an instance of {@link Hbck} instead of
+ * Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of
* constructing an HBaseHbck directly.
*
* <p>Connection should be an <i>unmanaged</i> connection obtained via
@@ -57,7 +55,6 @@ import org.slf4j.LoggerFactory;
* by each thread. Pooling or caching of the instance is not recommended.</p>
*
* @see ConnectionFactory
- * @see ClusterConnection
* @see Hbck
*/
@InterfaceAudience.Private
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 4300f6e..ee6247b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -153,7 +153,6 @@ public class HTable implements Table {
* @param rpcControllerFactory The RPC controller factory
* @param pool ExecutorService to be used.
*/
- @InterfaceAudience.Private
protected HTable(final ConnectionImplementation connection,
final TableBuilderBase builder,
final RpcRetryingCallerFactory rpcCallerFactory,
@@ -449,22 +448,18 @@ public class HTable implements Table {
}
public static <R> void doBatchWithCallback(List<? extends Row> actions, Object[] results,
- Callback<R> callback, ClusterConnection connection, ExecutorService pool, TableName tableName)
- throws InterruptedIOException, RetriesExhaustedWithDetailsException {
- int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout();
+ Callback<R> callback, Connection connection, ExecutorService pool, TableName tableName)
+ throws InterruptedIOException, RetriesExhaustedWithDetailsException {
+ ConnectionImplementation connImpl = (ConnectionImplementation) connection;
+ int operationTimeout = connImpl.getConnectionConfiguration().getOperationTimeout();
int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
- connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
- HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
- AsyncProcessTask<R> task = AsyncProcessTask.newBuilder(callback)
- .setPool(pool)
- .setTableName(tableName)
- .setRowAccess(actions)
- .setResults(results)
- .setOperationTimeout(operationTimeout)
- .setRpcTimeout(writeTimeout)
- .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
- .build();
- AsyncRequestFuture ars = connection.getAsyncProcess().submit(task);
+ connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+ HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+ AsyncProcessTask<R> task =
+ AsyncProcessTask.newBuilder(callback).setPool(pool).setTableName(tableName)
+ .setRowAccess(actions).setResults(results).setOperationTimeout(operationTimeout)
+ .setRpcTimeout(writeTimeout).setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build();
+ AsyncRequestFuture ars = connImpl.getAsyncProcess().submit(task);
ars.waitUntilDone();
if (ars.hasError()) {
throw ars.getErrors();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java
index 76643e6..249cd87 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java
@@ -21,7 +21,6 @@ import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Map;
-
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableName;
@@ -31,7 +30,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
/**
- * Hbck fixup tool APIs. Obtain an instance from {@link ClusterConnection#getHbck()} and call
+ * Hbck fixup tool APIs. Obtain an instance from {@link Connection#getHbck()} and call
* {@link #close()} when done.
* <p>WARNING: the below methods can damage the cluster. It may leave the cluster in an
* indeterminate state, e.g. region not assigned, or some hdfs files left behind. After running
@@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
* procedures to get regions back online. DO AT YOUR OWN RISK. For experienced users only.
*
* @see ConnectionFactory
- * @see ClusterConnection
* @since 2.0.2, 2.1.1
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
index 7ae9731..04da2eb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
@@ -43,12 +43,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
*/
@InterfaceAudience.Private
abstract class MasterCallable<V> implements RetryingCallable<V>, Closeable {
- protected final ClusterConnection connection;
+ protected final ConnectionImplementation connection;
protected MasterKeepAliveConnection master;
private final HBaseRpcController rpcController;
- MasterCallable(final Connection connection, final RpcControllerFactory rpcConnectionFactory) {
- this.connection = (ClusterConnection) connection;
+ MasterCallable(ConnectionImplementation connection,
+ final RpcControllerFactory rpcConnectionFactory) {
+ this.connection = connection;
this.rpcController = rpcConnectionFactory.newController();
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index bfc161e..bf557fa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -31,15 +30,16 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
-import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
* Callable that handles the <code>multi</code> method call going against a single
@@ -52,7 +52,7 @@ class MultiServerCallable extends CancellableRegionServerCallable<MultiResponse>
private MultiAction multiAction;
private boolean cellBlock;
- MultiServerCallable(final ClusterConnection connection, final TableName tableName,
+ MultiServerCallable(final ConnectionImplementation connection, final TableName tableName,
final ServerName location, final MultiAction multi, RpcController rpcController,
int rpcTimeout, RetryingTimeTracker tracker, int priority) {
super(connection, tableName, null, rpcController, rpcTimeout, tracker, priority);
@@ -141,7 +141,7 @@ class MultiServerCallable extends CancellableRegionServerCallable<MultiResponse>
private boolean isCellBlock() {
// This is not exact -- the configuration could have changed on us after connection was set up
// but it will do for now.
- ClusterConnection conn = getConnection();
+ ConnectionImplementation conn = getConnection();
return conn.hasCellBlockSupport();
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java
index 2da8422..69f4f4a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java
@@ -46,8 +46,8 @@ public abstract class NoncedRegionServerCallable<T> extends ClientServiceCallabl
* @param tableName Table name to which <code>row</code> belongs.
* @param row The row we want in <code>tableName</code>.
*/
- public NoncedRegionServerCallable(Connection connection, TableName tableName, byte [] row,
- HBaseRpcController rpcController, int priority) {
+ public NoncedRegionServerCallable(ConnectionImplementation connection, TableName tableName,
+ byte[] row, HBaseRpcController rpcController, int priority) {
super(connection, tableName, row, rpcController, priority);
this.nonce = getConnection().getNonceGenerator().newNonce();
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
index 448302c..80371b7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
@@ -46,7 +46,7 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel {
private static final Logger LOG = LoggerFactory.getLogger(RegionCoprocessorRpcChannel.class);
private final TableName table;
private final byte [] row;
- private final ClusterConnection conn;
+ private final ConnectionImplementation conn;
private byte[] lastRegion;
private final int operationTimeout;
private final RpcRetryingCallerFactory rpcCallerFactory;
@@ -57,7 +57,7 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel {
* @param table to connect to
* @param row to locate region with
*/
- RegionCoprocessorRpcChannel(ClusterConnection conn, TableName table, byte[] row) {
+ RegionCoprocessorRpcChannel(ConnectionImplementation conn, TableName table, byte[] row) {
this.table = table;
this.row = row;
this.conn = conn;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index 264304e..009544c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.client;
import java.io.IOException;
-
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -27,11 +26,12 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
/**
* Implementations make a RPC call against a RegionService via a protobuf Service.
@@ -75,12 +75,12 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
* @param tableName Table name to which <code>row</code> belongs.
* @param row The row we want in <code>tableName</code>.
*/
- public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row,
+ public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte[] row,
RpcController rpcController) {
this(connection, tableName, row, rpcController, HConstants.NORMAL_QOS);
}
- public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row,
+ public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte[] row,
RpcController rpcController, int priority) {
super();
this.connection = connection;
@@ -160,7 +160,7 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
}
/**
- * @return {@link ClusterConnection} instance used by this Callable.
+ * @return {@link ConnectionImplementation} instance used by this Callable.
*/
protected ConnectionImplementation getConnection() {
return this.connection;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
index 34c24c0..2ed037e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
@@ -37,13 +37,6 @@ public class ReversedClientScanner extends ClientScanner {
/**
* Create a new ReversibleClientScanner for the specified table Note that the passed
* {@link Scan}'s start row maybe changed.
- * @param conf
- * @param scan
- * @param tableName
- * @param connection
- * @param pool
- * @param primaryOperationTimeout
- * @throws IOException
*/
public ReversedClientScanner(Configuration conf, Scan scan, TableName tableName,
ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
index 30e541c..6a325b2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
@@ -52,8 +52,8 @@ public class ReversedScannerCallable extends ScannerCallable {
* @param rpcFactory to create an {@link com.google.protobuf.RpcController} to talk to the
* regionserver
*/
- public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
- ScanMetrics scanMetrics, RpcControllerFactory rpcFactory) {
+ public ReversedScannerCallable(ConnectionImplementation connection, TableName tableName,
+ Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory) {
super(connection, tableName, scan, scanMetrics, rpcFactory);
}
@@ -66,8 +66,8 @@ public class ReversedScannerCallable extends ScannerCallable {
* regionserver
* @param replicaId the replica id
*/
- public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
- ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) {
+ public ReversedScannerCallable(ConnectionImplementation connection, TableName tableName,
+ Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) {
super(connection, tableName, scan, scanMetrics, rpcFactory, replicaId);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index 45b74ef..bf7135f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -101,23 +101,24 @@ public class ScannerCallable extends ClientServiceCallable<Result[]> {
* @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect
* metrics
* @param rpcControllerFactory factory to use when creating
- * {@link com.google.protobuf.RpcController}
+ * {@link com.google.protobuf.RpcController}
*/
- public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
+ public ScannerCallable(ConnectionImplementation connection, TableName tableName, Scan scan,
ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) {
this(connection, tableName, scan, scanMetrics, rpcControllerFactory, 0);
}
+
/**
- *
* @param connection
* @param tableName
* @param scan
* @param scanMetrics
* @param id the replicaId
*/
- public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
+ public ScannerCallable(ConnectionImplementation connection, TableName tableName, Scan scan,
ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory, int id) {
- super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(), scan.getPriority());
+ super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(),
+ scan.getPriority());
this.id = id;
this.scan = scan;
this.scanMetrics = scanMetrics;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
index 27e5f87..db956ce 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
@@ -76,7 +76,7 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
public ScannerCallableWithReplicas(TableName tableName, ConnectionImplementation cConnection,
ScannerCallable baseCallable, ExecutorService pool, int timeBeforeReplicas, Scan scan,
int retries, int scannerTimeout, int caching, Configuration conf,
- RpcRetryingCaller<Result []> caller) {
+ RpcRetryingCaller<Result[]> caller) {
this.currentScannerCallable = baseCallable;
this.cConnection = cConnection;
this.pool = pool;
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index bd6f03c..02e4c46 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -1124,11 +1124,8 @@ public class TestAsyncProcess {
1, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS);
}
- private void checkPeriodicFlushParameters(ClusterConnection conn,
- MyAsyncProcess ap,
- long setTO, long expectTO,
- long setTT, long expectTT
- ) {
+ private void checkPeriodicFlushParameters(ConnectionImplementation conn, MyAsyncProcess ap,
+ long setTO, long expectTO, long setTT, long expectTT) {
BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
// The BufferedMutatorParams does nothing with the value
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
index f0375e2..647ea32 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java
@@ -44,8 +44,7 @@ public class TestBufferedMutator {
public TestName name = new TestName();
/**
- * My BufferedMutator.
- * Just to prove that I can insert a BM other than default.
+ * My BufferedMutator. Just to prove that I can insert a BM other than default.
*/
public static class MyBufferedMutator extends BufferedMutatorImpl {
MyBufferedMutator(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory,
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 5a5d6d0..cb60695 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
+import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
@@ -28,7 +29,6 @@ import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterManager.ServiceType;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -37,10 +37,6 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-
/**
* Manages the interactions with an already deployed distributed cluster (as opposed to
* a pseudo-distributed, or mini/local cluster). This is used by integration and system tests.
@@ -100,18 +96,6 @@ public class DistributedHBaseCluster extends HBaseCluster {
}
@Override
- public AdminProtos.AdminService.BlockingInterface getAdminProtocol(ServerName serverName)
- throws IOException {
- return ((ClusterConnection)this.connection).getAdmin(serverName);
- }
-
- @Override
- public ClientProtos.ClientService.BlockingInterface getClientProtocol(ServerName serverName)
- throws IOException {
- return ((ClusterConnection)this.connection).getClient(serverName);
- }
-
- @Override
public void startRegionServer(String hostname, int port) throws IOException {
LOG.info("Starting RS on: " + hostname);
clusterManager.start(ServiceType.HBASE_REGIONSERVER, hostname, port);
@@ -262,13 +246,6 @@ public class DistributedHBaseCluster extends HBaseCluster {
throw new IOException("did timeout waiting for service to start:" + serverName);
}
-
- @Override
- public MasterService.BlockingInterface getMasterAdminService()
- throws IOException {
- return ((ClusterConnection)this.connection).getMaster();
- }
-
@Override
public void startMaster(String hostname, int port) throws IOException {
LOG.info("Starting Master on: " + hostname + ":" + port);
@@ -297,7 +274,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < timeout) {
try {
- getMasterAdminService();
+ connection.getAdmin().getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION));
return true;
} catch (MasterNotRunningException m) {
LOG.warn("Master not started yet " + m);
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
index 906abca..eff26d7 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java
@@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.BufferedMutatorParams;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@@ -98,7 +98,7 @@ public class TestMultiTableInputFormatBase {
// canned responses.
JobContext mockedJobContext = Mockito.mock(JobContext.class);
Configuration c = HBaseConfiguration.create();
- c.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName());
+ c.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName());
Mockito.when(mockedJobContext.getConfiguration()).thenReturn(c);
// Invent a bunch of scans. Have each Scan go against a different table so a good spread.
List<Scan> scans = new ArrayList<>();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
index 4436ee1..944bd10 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java
@@ -17,9 +17,9 @@
*/
package org.apache.hadoop.hbase.mapreduce;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.anyBoolean;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.BufferedMutatorParams;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLocator;
@@ -90,7 +90,7 @@ public class TestTableInputFormatBase {
public void testNonSuccessiveSplitsAreNotMerged() throws IOException {
JobContext context = mock(JobContext.class);
Configuration conf = HBaseConfiguration.create();
- conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL,
+ conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
ConnectionForMergeTesting.class.getName());
conf.set(TableInputFormat.INPUT_TABLE, "testTable");
conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index c33d5af..e0e95df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.AsyncConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
@@ -55,14 +54,6 @@ public interface Server extends Abortable, Stoppable {
Connection createConnection(Configuration conf) throws IOException;
/**
- * Returns a reference to the servers' cluster connection. Prefer {@link #getConnection()}.
- *
- * Important note: this method returns a reference to Connection which is managed
- * by Server itself, so callers must NOT attempt to close connection obtained.
- */
- ClusterConnection getClusterConnection();
-
- /**
* Returns a reference to the servers' async connection.
* <p/>
* Important note: this method returns a reference to Connection which is managed by Server
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
index 142788e..af0d560 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
@@ -18,14 +18,13 @@
package org.apache.hadoop.hbase.backup.example;
import java.io.IOException;
-
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
/**
@@ -36,9 +35,9 @@ public class ZKTableArchiveClient extends Configured {
/** Configuration key for the archive node. */
private static final String ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY = "zookeeper.znode.hfile.archive";
- private ClusterConnection connection;
+ private Connection connection;
- public ZKTableArchiveClient(Configuration conf, ClusterConnection connection) {
+ public ZKTableArchiveClient(Configuration conf, Connection connection) {
super(conf);
this.connection = connection;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index c3f8f8b..45dc8be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -64,7 +64,7 @@ public interface AsyncClusterConnection extends AsyncConnection {
List<Entry> entries, int replicaId, int numRetries, long operationTimeoutNs);
/**
- * Return all the replicas for a region. Used for regiong replica replication.
+ * Return all the replicas for a region. Used for region replica replication.
*/
CompletableFuture<RegionLocations> getRegionLocations(TableName tableName, byte[] row,
boolean reload);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 10f3632..204e380 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1170,7 +1170,7 @@ public class HMaster extends HRegionServer implements MasterServices {
if (QuotaUtil.isQuotaEnabled(conf)) {
// Create the quota snapshot notifier
spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
- spaceQuotaSnapshotNotifier.initialize(getClusterConnection());
+ spaceQuotaSnapshotNotifier.initialize(getConnection());
this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics());
// Start the chore to read the region FS space reports and act on them
getChoreService().scheduleChore(quotaObserverChore);
@@ -1267,7 +1267,7 @@ public class HMaster extends HRegionServer implements MasterServices {
*/
private boolean waitForNamespaceOnline() throws InterruptedException, IOException {
TableState nsTableState =
- MetaTableAccessor.getTableState(getClusterConnection(), TableName.NAMESPACE_TABLE_NAME);
+ MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME);
if (nsTableState == null || nsTableState.isDisabled()) {
// this means we have already migrated the data and disabled or deleted the namespace table,
// or this is a new depliy which does not have a namespace table from the beginning.
@@ -1857,7 +1857,7 @@ public class HMaster extends HRegionServer implements MasterServices {
List<NormalizationPlan> plans = this.normalizer.computePlanForTable(table);
if (plans != null) {
for (NormalizationPlan plan : plans) {
- plan.execute(clusterConnection.getAdmin());
+ plan.execute(connection.getAdmin());
if (plan.getType() == PlanType.SPLIT) {
splitPlanCount++;
} else if (plan.getType() == PlanType.MERGE) {
@@ -3058,8 +3058,8 @@ public class HMaster extends HRegionServer implements MasterServices {
// this is what we want especially if the Master is in startup phase doing call outs to
// hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on
// the rpc to timeout.
- if (this.clusterConnection != null) {
- this.clusterConnection.close();
+ if (this.connection != null) {
+ this.connection.close();
}
if (this.asyncClusterConnection != null) {
this.asyncClusterConnection.close();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
index d401141..8666743 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
@@ -28,19 +28,16 @@ import java.util.stream.Stream;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.util.FutureUtils;
import org.apache.hadoop.hbase.wal.WALSplitUtil;
import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
@@ -66,22 +63,15 @@ final class AssignmentManagerUtil {
static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env,
final ServerName regionLocation, final RegionInfo hri, boolean includeBestSplitRow)
throws IOException {
- // TODO: There is no timeout on this controller. Set one!
- HBaseRpcController controller =
- env.getMasterServices().getClusterConnection().getRpcControllerFactory().newController();
- final AdminService.BlockingInterface admin =
- env.getMasterServices().getClusterConnection().getAdmin(regionLocation);
+ AsyncRegionServerAdmin admin =
+ env.getMasterServices().getAsyncClusterConnection().getRegionServerAdmin(regionLocation);
GetRegionInfoRequest request = null;
if (includeBestSplitRow) {
request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName(), false, true);
} else {
request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName());
}
- try {
- return admin.getRegionInfo(controller, request);
- } catch (ServiceException e) {
- throw ProtobufUtil.handleRemoteException(e);
- }
+ return FutureUtils.get(admin.getRegionInfo(request));
}
private static void lock(List<RegionStateNode> regionNodes) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
index 3972700..b9797bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
@@ -90,7 +90,7 @@ public class RegionServerSpaceQuotaManager {
return;
}
// Start the chores
- this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getClusterConnection());
+ this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getConnection());
rsServices.getChoreService().scheduleChore(spaceQuotaRefresher);
this.regionSizeReporter = new RegionSizeReportingChore(rsServices);
rsServices.getChoreService().scheduleChore(regionSizeReporter);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java
index b325f66..fae0e81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java
@@ -17,12 +17,11 @@
package org.apache.hadoop.hbase.quotas.policies;
import java.io.IOException;
-
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.quotas.SpaceLimitingException;
import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* A {@link SpaceViolationPolicyEnforcement} which disables the table. The enforcement counterpart
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index a6214ba..59dadc3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -84,7 +84,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.ZNodeClearer;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionUtils;
@@ -277,14 +276,17 @@ public class HRegionServer extends HasThread implements
protected HeapMemoryManager hMemManager;
/**
- * Cluster connection to be shared by services.
+ * Connection to be shared by services.
+ * <p/>
* Initialized at server startup and closed when server shuts down.
+ * <p/>
* Clients must never close it explicitly.
- * Clients hosted by this Server should make use of this clusterConnection rather than create
- * their own; if they create their own, there is no way for the hosting server to shutdown
- * ongoing client RPCs.
+ * <p/>
+ * Clients hosted by this Server should make use of this connection rather than create their own;
+ * if they create their own, there is no way for the hosting server to shutdown ongoing client
+ * RPCs.
*/
- protected ClusterConnection clusterConnection;
+ protected Connection connection;
/**
* The asynchronous cluster connection to be shared by services.
@@ -829,11 +831,11 @@ public class HRegionServer extends HasThread implements
* Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the
* local server; i.e. a short-circuit Connection. Safe to use going to local or remote server.
*/
- private ClusterConnection createClusterConnection() throws IOException {
+ private Connection createConnection() throws IOException {
// Create a cluster connection that when appropriate, can short-circuit and go directly to the
// local server if the request is to the local server bypassing RPC. Can be used for both local
// and remote invocations.
- ClusterConnection conn =
+ Connection conn =
ConnectionUtils.createShortCircuitConnection(unsetClientZookeeperQuorum(), null,
userProvider.getCurrent(), serverName, rpcServices, rpcServices);
// This is used to initialize the batch thread pool inside the connection implementation.
@@ -870,8 +872,8 @@ public class HRegionServer extends HasThread implements
* Setup our cluster connection if not already initialized.
*/
protected final synchronized void setupClusterConnection() throws IOException {
- if (clusterConnection == null) {
- clusterConnection = createClusterConnection();
+ if (connection == null) {
+ connection = createConnection();
asyncClusterConnection =
ClusterConnectionFactory.createAsyncClusterConnection(unsetClientZookeeperQuorum(),
new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), userProvider.getCurrent());
@@ -1128,9 +1130,9 @@ public class HRegionServer extends HasThread implements
LOG.info("stopping server " + this.serverName);
}
- if (this.clusterConnection != null && !clusterConnection.isClosed()) {
+ if (this.connection != null && !connection.isClosed()) {
try {
- this.clusterConnection.close();
+ this.connection.close();
} catch (IOException e) {
// Although the {@link Closeable} interface throws an {@link
// IOException}, in reality, the implementation would never do that.
@@ -2201,12 +2203,7 @@ public class HRegionServer extends HasThread implements
@Override
public Connection getConnection() {
- return getClusterConnection();
- }
-
- @Override
- public ClusterConnection getClusterConnection() {
- return this.clusterConnection;
+ return this.connection;
}
@Override
@@ -2312,7 +2309,7 @@ public class HRegionServer extends HasThread implements
}
} else {
try {
- MetaTableAccessor.updateRegionLocation(clusterConnection,
+ MetaTableAccessor.updateRegionLocation(connection,
hris[0], serverName, openSeqNum, masterSystemTime);
} catch (IOException e) {
LOG.info("Failed to update meta", e);
@@ -2343,7 +2340,7 @@ public class HRegionServer extends HasThread implements
// Keep looping till we get an error. We want to send reports even though server is going down.
// Only go down if clusterConnection is null. It is set to null almost as last thing as the
// HRegionServer does down.
- while (this.clusterConnection != null && !this.clusterConnection.isClosed()) {
+ while (this.connection != null && !this.connection.isClosed()) {
RegionServerStatusService.BlockingInterface rss = rssStub;
try {
if (rss == null) {
@@ -3816,7 +3813,7 @@ public class HRegionServer extends HasThread implements
@Override
public void unassign(byte[] regionName) throws IOException {
- clusterConnection.getAdmin().unassign(regionName, false);
+ connection.getAdmin().unassign(regionName, false);
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 432dbcd..a415477 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.replication.TableCFs;
@@ -208,7 +208,7 @@ public class DumpReplicationQueues extends Configured implements Tool {
Configuration conf = getConf();
HBaseAdmin.available(conf);
- ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
+ Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(),
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index 7d1245c..bbd7675 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -153,7 +152,7 @@ public class ReplicationSyncUp extends Configured implements Tool {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -163,11 +162,6 @@ public class ReplicationSyncUp extends Configured implements Tool {
}
@Override
- public ClusterConnection getClusterConnection() {
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index d3ad73b..1dd5ce0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
@@ -161,9 +160,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-
/**
* HBaseFsck (hbck) is a tool for checking and repairing region consistency and
* table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not
@@ -245,7 +241,7 @@ public class HBaseFsck extends Configured implements Closeable {
**********************/
private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());
private ClusterMetrics status;
- private ClusterConnection connection;
+ private Connection connection;
private Admin admin;
private Table meta;
// threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions
@@ -585,7 +581,7 @@ public class HBaseFsck extends Configured implements Closeable {
LOG.info("Launching hbck");
- connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());
+ connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
meta = connection.getTable(TableName.META_TABLE_NAME);
status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,
@@ -4332,10 +4328,10 @@ public class HBaseFsck extends Configured implements Closeable {
private final HBaseFsck hbck;
private final ServerName rsinfo;
private final ErrorReporter errors;
- private final ClusterConnection connection;
+ private final Connection connection;
WorkItemRegion(HBaseFsck hbck, ServerName info,
- ErrorReporter errors, ClusterConnection connection) {
+ ErrorReporter errors, Connection connection) {
this.hbck = hbck;
this.rsinfo = info;
this.errors = errors;
@@ -4346,32 +4342,29 @@ public class HBaseFsck extends Configured implements Closeable {
public synchronized Void call() throws IOException {
errors.progress();
try {
- BlockingInterface server = connection.getAdmin(rsinfo);
-
// list all online regions from this region server
- List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
+ List<RegionInfo> regions = connection.getAdmin().getRegions(rsinfo);
regions = filterRegions(regions);
if (details) {
- errors.detail("RegionServer: " + rsinfo.getServerName() +
- " number of regions: " + regions.size());
- for (RegionInfo rinfo: regions) {
- errors.detail(" " + rinfo.getRegionNameAsString() +
- " id: " + rinfo.getRegionId() +
- " encoded_name: " + rinfo.getEncodedName() +
- " start: " + Bytes.toStringBinary(rinfo.getStartKey()) +
- " end: " + Bytes.toStringBinary(rinfo.getEndKey()));
+ errors.detail(
+ "RegionServer: " + rsinfo.getServerName() + " number of regions: " + regions.size());
+ for (RegionInfo rinfo : regions) {
+ errors.detail(" " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() +
+ " encoded_name: " + rinfo.getEncodedName() + " start: " +
+ Bytes.toStringBinary(rinfo.getStartKey()) + " end: " +
+ Bytes.toStringBinary(rinfo.getEndKey()));
}
}
// check to see if the existence of this region matches the region in META
- for (RegionInfo r:regions) {
+ for (RegionInfo r : regions) {
HbckInfo hbi = hbck.getOrCreateInfo(r.getEncodedName());
hbi.addServer(r, rsinfo);
}
- } catch (IOException e) { // unable to connect to the region server.
- errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "RegionServer: " + rsinfo.getServerName() +
- " Unable to fetch region information. " + e);
+ } catch (IOException e) { // unable to connect to the region server.
+ errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE,
+ "RegionServer: " + rsinfo.getServerName() + " Unable to fetch region information. " + e);
throw e;
}
return null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 121d06c..d4a28c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ClusterConnectionFactory;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
index 5805793..d095fa3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
@@ -26,19 +26,17 @@ import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Provides ability to create multiple Connection instances and allows to process a batch of
@@ -112,14 +110,11 @@ public class MultiHConnection {
* @param callback to run when results are in
* @throws IOException If IO failure occurs
*/
- @SuppressWarnings("deprecation")
public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName,
Object[] results, Batch.Callback<R> callback) throws IOException {
// Currently used by RegionStateStore
- ClusterConnection conn =
- (ClusterConnection) connections[ThreadLocalRandom.current().nextInt(noOfConnections)];
-
- HTable.doBatchWithCallback(actions, results, callback, conn, batchPool, tableName);
+ HTable.doBatchWithCallback(actions, results, callback,
+ connections[ThreadLocalRandom.current().nextInt(noOfConnections)], batchPool, tableName);
}
// Copied from ConnectionImplementation.getBatchPool()
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index a779d36..540e7f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.NoServerForRegionException;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -550,7 +550,7 @@ public class RegionSplitter {
}
// make sure this region wasn't already split
- byte[] sk = regionLoc.getRegionInfo().getStartKey();
+ byte[] sk = regionLoc.getRegion().getStartKey();
if (sk.length != 0) {
if (Bytes.equals(split, sk)) {
LOG.debug("Region already split on "
@@ -712,7 +712,6 @@ public class RegionSplitter {
htd = table.getDescriptor();
}
try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
-
// for every region that hasn't been verified as a finished split
for (Pair<byte[], byte[]> region : regionList) {
byte[] start = region.getFirst();
@@ -720,7 +719,7 @@ public class RegionSplitter {
// see if the new split daughter region has come online
try {
- HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo();
+ RegionInfo dri = regionLocator.getRegionLocation(split, true).getRegion();
if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
logicalSplitting.add(region);
continue;
@@ -735,10 +734,10 @@ public class RegionSplitter {
try {
// when a daughter region is opened, a compaction is triggered
// wait until compaction completes for both daughter regions
- LinkedList<HRegionInfo> check = Lists.newLinkedList();
- check.add(regionLocator.getRegionLocation(start).getRegionInfo());
- check.add(regionLocator.getRegionLocation(split).getRegionInfo());
- for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) {
+ LinkedList<RegionInfo> check = Lists.newLinkedList();
+ check.add(regionLocator.getRegionLocation(start).getRegion());
+ check.add(regionLocator.getRegionLocation(split).getRegion());
+ for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) {
byte[] sk = hri.getStartKey();
if (sk.length == 0)
sk = splitAlgo.firstRow();
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index c9d58e5..0b6c060 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -261,7 +261,7 @@ if ( fqtn != null ) {
stateMap.put(regionInfo.getEncodedName(), regionState);
}
}
- RegionLocator r = master.getClusterConnection().getRegionLocator(table.getName());
+ RegionLocator r = master.getConnection().getRegionLocator(table.getName());
try { %>
<h2>Table Attributes</h2>
<table class="table table-striped">
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
index d760a7d..8020553 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase;
import java.io.Closeable;
import java.io.IOException;
-
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -28,10 +27,6 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-
/**
* This class defines methods that can help with managing HBase clusters
* from unit tests and system tests. There are 3 types of cluster deployments:
@@ -98,24 +93,6 @@ public abstract class HBaseCluster implements Closeable, Configurable {
}
/**
- * Returns an {@link MasterService.BlockingInterface} to the active master
- */
- public abstract MasterService.BlockingInterface getMasterAdminService()
- throws IOException;
-
- /**
- * Returns an AdminProtocol interface to the regionserver
- */
- public abstract AdminService.BlockingInterface getAdminProtocol(ServerName serverName)
- throws IOException;
-
- /**
- * Returns a ClientProtocol interface to the regionserver
- */
- public abstract ClientService.BlockingInterface getClientProtocol(ServerName serverName)
- throws IOException;
-
- /**
* Starts a new region server on the given hostname or if this is a mini/local cluster,
* starts a region server locally.
* @param hostname the hostname to start the regionserver on
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index 473eb74..92cfddf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -24,7 +24,6 @@ import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.master.HMaster;
@@ -42,9 +41,6 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
/**
@@ -521,15 +517,6 @@ public class MiniHBaseCluster extends HBaseCluster {
* Returns the current active master, if available.
* @return the active HMaster, null if none is active.
*/
- @Override
- public MasterService.BlockingInterface getMasterAdminService() {
- return this.hbaseCluster.getActiveMaster().getMasterRpcServices();
- }
-
- /**
- * Returns the current active master, if available.
- * @return the active HMaster, null if none is active.
- */
public HMaster getMaster() {
return this.hbaseCluster.getActiveMaster();
}
@@ -921,15 +908,4 @@ public class MiniHBaseCluster extends HBaseCluster {
}
return -1;
}
-
- @Override
- public AdminService.BlockingInterface getAdminProtocol(ServerName serverName) throws IOException {
- return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices();
- }
-
- @Override
- public ClientService.BlockingInterface getClientProtocol(ServerName serverName)
- throws IOException {
- return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices();
- }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index ade9cde..77b2b91 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -32,7 +32,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.locking.EntityLock;
@@ -162,7 +161,7 @@ public class MockRegionServerServices implements RegionServerServices {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -266,7 +265,6 @@ public class MockRegionServerServices implements RegionServerServices {
@Override
public ServerNonceManager getNonceManager() {
- // TODO Auto-generated method stub
return null;
}
@@ -277,7 +275,6 @@ public class MockRegionServerServices implements RegionServerServices {
@Override
public boolean registerService(Service service) {
- // TODO Auto-generated method stub
return false;
}
@@ -292,11 +289,6 @@ public class MockRegionServerServices implements RegionServerServices {
}
@Override
- public ClusterConnection getClusterConnection() {
- return null;
- }
-
- @Override
public ThroughputController getFlushThroughputController() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
index 16f3930..618fe74 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
@@ -86,7 +86,7 @@ public class TestZooKeeperTableArchiveClient {
private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
private static ZKTableArchiveClient archivingClient;
private final List<Path> toCleanup = new ArrayList<>();
- private static ClusterConnection CONNECTION;
+ private static Connection CONNECTION;
private static RegionServerServices rss;
/**
@@ -96,7 +96,7 @@ public class TestZooKeeperTableArchiveClient {
public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniZKCluster();
- CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration());
+ CONNECTION = ConnectionFactory.createConnection(UTIL.getConfiguration());
archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION);
// make hfile archiving node so we can archive files
ZKWatcher watcher = UTIL.getZooKeeperWatcher();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 2a5a395..2c129a8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -57,11 +57,11 @@ public class HConnectionTestingUtility {
* @throws ZooKeeperConnectionException
*/
public static ConnectionImplementation getMockedConnection(final Configuration conf)
- throws ZooKeeperConnectionException {
+ throws ZooKeeperConnectionException {
ConnectionImplementation connection = Mockito.mock(ConnectionImplementation.class);
Mockito.when(connection.getConfiguration()).thenReturn(conf);
- Mockito.when(connection.getRpcControllerFactory()).thenReturn(
- Mockito.mock(RpcControllerFactory.class));
+ Mockito.when(connection.getRpcControllerFactory())
+ .thenReturn(Mockito.mock(RpcControllerFactory.class));
// we need a real retrying caller
RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf);
Mockito.when(connection.getRpcRetryingCallerFactory()).thenReturn(callerFactory);
@@ -81,11 +81,10 @@ public class HConnectionTestingUtility {
* the mocked connection
* @return Mock up a connection that returns a {@link Configuration} when
* {@link ConnectionImplementation#getConfiguration()} is called, a 'location' when
- * {@link ConnectionImplementation#getRegionLocation(TableName,byte[], boolean)}
- * is called, and that returns the passed
- * {@link AdminProtos.AdminService.BlockingInterface} instance when
- * {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns the passed
- * {@link ClientProtos.ClientService.BlockingInterface} instance when
+ * {@link ConnectionImplementation#getRegionLocation(TableName, byte[], boolean)} is
+ * called, and that returns the passed {@link AdminProtos.AdminService.BlockingInterface}
+ * instance when {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns
+ * the passed {@link ClientProtos.ClientService.BlockingInterface} instance when
* {@link ConnectionImplementation#getClient(ServerName)} is called (Be sure to call
* {@link Connection#close()} when done with this mocked Connection.
*/
@@ -138,9 +137,7 @@ public class HConnectionTestingUtility {
* calling {@link Connection#close()} else it will stick around; this is probably not what you
* want.
* @param conf configuration
- * @return ConnectionImplementation object for <code>conf</code>
- * @throws ZooKeeperConnectionException [Dead link]: See also
- * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
+ * @return ClusterConnection object for <code>conf</code>
*/
public static ConnectionImplementation getSpiedConnection(final Configuration conf)
throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index ba11858..efdf187 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -634,8 +634,8 @@ public class TestAdmin1 {
assertFalse(ADMIN.tableExists(tableName));
}
- protected void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator, int
- expectedRegions) throws IOException {
+ private void verifyRoundRobinDistribution(ConnectionImplementation c,
+ RegionLocator regionLocator, int expectedRegions) throws IOException {
int numRS = c.getCurrentNrHRS();
List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
Map<ServerName, List<RegionInfo>> server2Regions = new HashMap<>();
@@ -654,13 +654,14 @@ public class TestAdmin1 {
// which contains less regions by intention.
numRS--;
}
- float average = (float) expectedRegions/numRS;
- int min = (int)Math.floor(average);
- int max = (int)Math.ceil(average);
+ float average = (float) expectedRegions / numRS;
+ int min = (int) Math.floor(average);
+ int max = (int) Math.ceil(average);
for (List<RegionInfo> regionList : server2Regions.values()) {
- assertTrue("numRS=" + numRS + ", min=" + min + ", max=" + max +
- ", size=" + regionList.size() + ", tablesOnMaster=" + tablesOnMaster,
- regionList.size() == min || regionList.size() == max);
+ assertTrue(
+ "numRS=" + numRS + ", min=" + min + ", max=" + max + ", size=" + regionList.size() +
+ ", tablesOnMaster=" + tablesOnMaster,
+ regionList.size() == min || regionList.size() == max);
}
}
@@ -740,7 +741,7 @@ public class TestAdmin1 {
List<HRegionLocation> regions;
Iterator<HRegionLocation> hris;
RegionInfo hri;
- ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
+ ConnectionImplementation conn = (ConnectionImplementation) TEST_UTIL.getConnection();
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
regions = l.getAllRegionLocations();
@@ -1241,13 +1242,9 @@ public class TestAdmin1 {
byte[][] nameofRegionsToMerge = new byte[2][];
nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes();
nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
- MergeTableRegionsRequest request = RequestConverter
- .buildMergeTableRegionsRequest(
- nameofRegionsToMerge,
- true,
- HConstants.NO_NONCE,
- HConstants.NO_NONCE);
- ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster()
+ MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(
+ nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
+ ((ConnectionImplementation) TEST_UTIL.getAdmin().getConnection()).getMaster()
.mergeTableRegions(null, request);
} catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException m) {
Throwable t = m.getCause();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index f4aafc0..6852718 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -769,7 +769,7 @@ public class TestAdmin2 {
Assert.assertNotNull(store);
Assert.assertEquals(expectedStoreFilesSize, store.getSize());
- ClusterConnection conn = ((ClusterConnection) ADMIN.getConnection());
+ ConnectionImplementation conn = (ConnectionImplementation) ADMIN.getConnection();
HBaseRpcController controller = conn.getRpcControllerFactory().newController();
for (int i = 0; i < 10; i++) {
RegionInfo ri =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 1f62731..0f08f44 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -274,7 +274,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
private void verifyRoundRobinDistribution(List<HRegionLocation> regions, int expectedRegions)
throws IOException {
- int numRS = ((ClusterConnection) TEST_UTIL.getConnection()).getCurrentNrHRS();
+ int numRS = ((ConnectionImplementation) TEST_UTIL.getConnection()).getCurrentNrHRS();
Map<ServerName, List<RegionInfo>> server2Regions = new HashMap<>();
regions.stream().forEach((loc) -> {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
index cd27a30..fd0eb7b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java
@@ -94,9 +94,9 @@ public class TestCISleep extends AbstractTestCITimeout {
final TableName tableName = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(tableName, FAM_NAM);
ClientServiceCallable<Object> regionServerCallable =
- new ClientServiceCallable<Object>(TEST_UTIL.getConnection(), tableName, FAM_NAM,
- new RpcControllerFactory(TEST_UTIL.getConfiguration()).newController(),
- HConstants.PRIORITY_UNSET) {
+ new ClientServiceCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
+ tableName, FAM_NAM, new RpcControllerFactory(TEST_UTIL.getConfiguration()).newController(),
+ HConstants.PRIORITY_UNSET) {
@Override
protected Object rpcCall() throws Exception {
return null;
@@ -126,9 +126,9 @@ public class TestCISleep extends AbstractTestCITimeout {
assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f));
}
- try (
- MasterCallable<Object> masterCallable = new MasterCallable<Object>(TEST_UTIL.getConnection(),
- new RpcControllerFactory(TEST_UTIL.getConfiguration())) {
+ try (MasterCallable<Object> masterCallable =
+ new MasterCallable<Object>((ConnectionImplementation) TEST_UTIL.getConnection(),
+ new RpcControllerFactory(TEST_UTIL.getConfiguration())) {
@Override
protected Object rpcCall() throws Exception {
return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
index b1ad866..8a4c065 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java
@@ -831,7 +831,7 @@ public class TestConnectionImplementation {
* from ZK by the client.
*/
@Test
- public void testConnection() throws Exception{
+ public void testConnection() throws Exception {
// We create an empty config and add the ZK address.
Configuration c = new Configuration();
c.set(HConstants.ZOOKEEPER_QUORUM,
@@ -840,7 +840,8 @@ public class TestConnectionImplementation {
TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT));
// This should be enough to connect
- ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(c);
+ ConnectionImplementation conn =
+ (ConnectionImplementation) ConnectionFactory.createConnection(c);
assertTrue(conn.isMasterRunning());
conn.close();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index 9ec7b96..f32123d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -154,7 +154,7 @@ public class TestFromClientSide3 {
// connection needed for poll-wait
HRegionLocation loc = locator.getRegionLocation(row, true);
AdminProtos.AdminService.BlockingInterface server =
- ((ClusterConnection) admin.getConnection()).getAdmin(loc.getServerName());
+ ((ConnectionImplementation) admin.getConnection()).getAdmin(loc.getServerName());
byte[] regName = loc.getRegionInfo().getRegionName();
for (int i = 0; i < nFlushes; i++) {
@@ -276,7 +276,7 @@ public class TestFromClientSide3 {
try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) {
TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS);
try (Admin admin = TEST_UTIL.getAdmin()) {
- ClusterConnection connection = (ClusterConnection) TEST_UTIL.getConnection();
+ ConnectionImplementation connection = (ConnectionImplementation) TEST_UTIL.getConnection();
// Create 3 store files.
byte[] row = Bytes.toBytes(random.nextInt());
@@ -655,7 +655,7 @@ public class TestFromClientSide3 {
@Test
public void testConnectionDefaultUsesCodec() throws Exception {
- ClusterConnection con = (ClusterConnection) TEST_UTIL.getConnection();
+ ConnectionImplementation con = (ConnectionImplementation) TEST_UTIL.getConnection();
assertTrue(con.hasCellBlockSupport());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
index 53f5064..108ab7f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java
@@ -107,15 +107,13 @@ public class TestMetaTableAccessorNoCluster {
Result r = Result.create(kvs);
assertNull(MetaTableAccessor.getRegionInfo(r));
- byte [] f = HConstants.CATALOG_FAMILY;
+ byte[] f = HConstants.CATALOG_FAMILY;
// Make a key value that doesn't have the expected qualifier.
- kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
- HConstants.SERVER_QUALIFIER, f));
+ kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.SERVER_QUALIFIER, f));
r = Result.create(kvs);
assertNull(MetaTableAccessor.getRegionInfo(r));
// Make a key that does not have a regioninfo value.
- kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
- HConstants.REGIONINFO_QUALIFIER, f));
+ kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, f));
RegionInfo hri = MetaTableAccessor.getRegionInfo(Result.create(kvs));
assertTrue(hri == null);
// OK, give it what it expects
@@ -161,7 +159,7 @@ public class TestMetaTableAccessorNoCluster {
RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO)));
kvs.add(new KeyValue(rowToVerify,
HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
- Bytes.toBytes(sn.getHostAndPort())));
+ Bytes.toBytes(sn.getAddress().toString())));
kvs.add(new KeyValue(rowToVerify,
HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
Bytes.toBytes(sn.getStartcode())));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
index 7d36e99..50c9bd8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
@@ -553,7 +553,7 @@ public class TestMultiParallel {
};
NonceGenerator oldCnm =
- ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection)connection, cnm);
+ ConnectionUtils.injectNonceGeneratorForTesting((ConnectionImplementation) connection, cnm);
// First test sequential requests.
try {
@@ -615,7 +615,8 @@ public class TestMultiParallel {
validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L));
table.close();
} finally {
- ConnectionImplementation.injectNonceGeneratorForTesting((ClusterConnection) connection, oldCnm);
+ ConnectionImplementation.injectNonceGeneratorForTesting((ConnectionImplementation) connection,
+ oldCnm);
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 6616b3b..c8a7ca1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -571,7 +571,7 @@ public class TestReplicasClient {
LOG.info("get works and is not stale done");
//reset
- ClusterConnection connection = (ClusterConnection) HTU.getConnection();
+ ConnectionImplementation connection = (ConnectionImplementation) HTU.getConnection();
Counter hedgedReadOps = connection.getConnectionMetrics().hedgedReadOps;
Counter hedgedReadWin = connection.getConnectionMetrics().hedgedReadWin;
hedgedReadOps.dec(hedgedReadOps.getCount());
@@ -638,7 +638,7 @@ public class TestReplicasClient {
Thread.sleep(1000 + REFRESH_PERIOD * 2);
- AsyncProcess ap = ((ClusterConnection) HTU.getConnection()).getAsyncProcess();
+ AsyncProcess ap = ((ConnectionImplementation) HTU.getConnection()).getAsyncProcess();
// Make primary slowdown
SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(1));
@@ -654,16 +654,14 @@ public class TestReplicasClient {
gets.add(g);
Object[] results = new Object[2];
- int operationTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getOperationTimeout();
- int readTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getReadRpcTimeout();
- AsyncProcessTask task = AsyncProcessTask.newBuilder()
- .setPool(HTable.getDefaultExecutor(HTU.getConfiguration()))
- .setTableName(table.getName())
- .setRowAccess(gets)
- .setResults(results)
- .setOperationTimeout(operationTimeout)
- .setRpcTimeout(readTimeout)
- .build();
+ int operationTimeout = ((ConnectionImplementation) HTU.getConnection())
+ .getConnectionConfiguration().getOperationTimeout();
+ int readTimeout = ((ConnectionImplementation) HTU.getConnection())
+ .getConnectionConfiguration().getReadRpcTimeout();
+ AsyncProcessTask task =
+ AsyncProcessTask.newBuilder().setPool(HTable.getDefaultExecutor(HTU.getConfiguration()))
+ .setTableName(table.getName()).setRowAccess(gets).setResults(results)
+ .setOperationTimeout(operationTimeout).setRpcTimeout(readTimeout).build();
AsyncRequestFuture reqs = ap.submit(task);
reqs.waitUntilDone();
// verify we got the right results back
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
index b0ec37e..d7f0c87 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
@@ -206,7 +206,7 @@ public class TestSeparateClientZKCluster {
// create table
Connection conn = TEST_UTIL.getConnection();
Admin admin = conn.getAdmin();
- HTable table = (HTable) conn.getTable(tn);
+ Table table = conn.getTable(tn);
try {
ColumnFamilyDescriptorBuilder cfDescBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(family);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
index beaa59b..f743388 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
@@ -75,7 +75,7 @@ public class TestShortCircuitConnection {
htd.addFamily(hcd);
UTIL.createTable(htd, null);
HRegionServer regionServer = UTIL.getRSForFirstRegionInTable(tableName);
- ClusterConnection connection = regionServer.getClusterConnection();
+ ConnectionImplementation connection = (ConnectionImplementation) regionServer.getConnection();
Table tableIf = connection.getTable(tableName);
assertTrue(tableIf instanceof HTable);
HTable table = (HTable) tableIf;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 6aa0d5a..7cfec57 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.MasterSwitchType;
@@ -163,7 +162,7 @@ public class MockNoopMasterServices implements MasterServices {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -355,11 +354,6 @@ public class MockNoopMasterServices implements MasterServices {
}
@Override
- public ClusterConnection getClusterConnection() {
- return null;
- }
-
- @Override
public LoadBalancer getLoadBalancer() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index d7a46eb..211efc0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -305,7 +304,7 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -621,11 +620,6 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
}
@Override
- public ClusterConnection getClusterConnection() {
- return null;
- }
-
- @Override
public ThroughputController getFlushThroughputController() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 77667a7..7f9605e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -313,7 +312,7 @@ public class TestActiveMasterManager {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -331,12 +330,6 @@ public class TestActiveMasterManager {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
index a0aae32..0deea15 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
@@ -18,16 +18,12 @@
package org.apache.hadoop.hbase.master;
import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
import java.net.InetAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClockOutOfSyncException;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.ClassRule;
@@ -51,14 +47,7 @@ public class TestClockSkewDetection {
@Test
public void testClockSkewDetection() throws Exception {
final Configuration conf = HBaseConfiguration.create();
- ServerManager sm = new ServerManager(new MockNoopMasterServices(conf) {
- @Override
- public ClusterConnection getClusterConnection() {
- ClusterConnection conn = mock(ClusterConnection.class);
- when(conn.getRpcControllerFactory()).thenReturn(mock(RpcControllerFactory.class));
- return conn;
- }
- });
+ ServerManager sm = new ServerManager(new MockNoopMasterServices(conf));
LOG.debug("regionServerStartup 1");
InetAddress ia1 = InetAddress.getLocalHost();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index 74d0548..f1758ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.ServerMetricsBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -184,7 +184,7 @@ public class TestMasterNoCluster {
// Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
// the conf from the master; the conf will already have an ClusterConnection
// associate so the below mocking of a connection will fail.
- final ClusterConnection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate(
+ final Connection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate(
TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(),
HRegionInfo.FIRST_META_REGIONINFO);
HMaster master = new HMaster(conf) {
@@ -212,12 +212,7 @@ public class TestMasterNoCluster {
}
@Override
- public ClusterConnection getConnection() {
- return mockedConnection;
- }
-
- @Override
- public ClusterConnection getClusterConnection() {
+ public Connection getConnection() {
return mockedConnection;
}
};
@@ -281,7 +276,7 @@ public class TestMasterNoCluster {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
// Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
// the conf from the master; the conf will already have a Connection
// associate so the below mocking of a connection will fail.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 56467cc..ef64c94 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.ServerMetricsBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -87,7 +87,7 @@ public class MockMasterServices extends MockNoopMasterServices {
private MasterProcedureEnv procedureEnv;
private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
private ProcedureStore procedureStore;
- private final ClusterConnection connection;
+ private final Connection connection;
private final LoadBalancer balancer;
private final ServerManager serverManager;
@@ -284,7 +284,7 @@ public class MockMasterServices extends MockNoopMasterServices {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return this.connection;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index c5fad32..3d6466d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -228,7 +227,7 @@ public class TestHFileCleaner {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -261,12 +260,6 @@ public class TestHFileCleaner {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index fd11ff8..82c8684 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -164,7 +163,7 @@ public class TestHFileLinkCleaner {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -193,13 +192,6 @@ public class TestHFileLinkCleaner {
public ChoreService getChoreService() {
return null;
}
-
- @Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
@Override
public FileSystem getFileSystem() {
return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index a5402c5..d9bba53 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -363,7 +362,7 @@ public class TestLogsCleaner {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -394,11 +393,6 @@ public class TestLogsCleaner {
}
@Override
- public ClusterConnection getClusterConnection() {
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index 9791643..b16d377 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -252,7 +251,7 @@ public class TestReplicationHFileCleaner {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -285,12 +284,6 @@ public class TestReplicationHFileCleaner {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index b4d16c6..29e6dbb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -359,11 +359,11 @@ public class MasterProcedureTestingUtility {
// Procedure Helpers
// ==========================================================================
public static long generateNonceGroup(final HMaster master) {
- return master.getClusterConnection().getNonceGenerator().getNonceGroup();
+ return master.getAsyncClusterConnection().getNonceGenerator().getNonceGroup();
}
public static long generateNonce(final HMaster master) {
- return master.getClusterConnection().getNonceGenerator().newNonce();
+ return master.getAsyncClusterConnection().getNonceGenerator().newNonce();
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index fd02cf4..af8cfb8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClientServiceCallable;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.RpcRetryingCaller;
@@ -66,7 +66,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener;
@@ -89,10 +88,6 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-
/**
* Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of
* the region server's bullkLoad functionality.
@@ -214,29 +209,17 @@ public class TestHRegionServerBulkLoad {
}
// bulk load HFiles
BulkLoadHFiles.create(UTIL.getConfiguration()).bulkLoad(tableName, family2Files);
+ final Connection conn = UTIL.getConnection();
// Periodically do compaction to reduce the number of open file handles.
if (numBulkLoads.get() % 5 == 0) {
RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf);
RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
// 5 * 50 = 250 open file handles!
- ClientServiceCallable<Void> callable =
- new ClientServiceCallable<Void>(UTIL.getConnection(), tableName, Bytes.toBytes("aaa"),
- new RpcControllerFactory(UTIL.getConfiguration()).newController(),
- HConstants.PRIORITY_UNSET) {
- @Override
- protected Void rpcCall() throws Exception {
- LOG.debug(
- "compacting " + getLocation() + " for row " + Bytes.toStringBinary(getRow()));
- AdminProtos.AdminService.BlockingInterface server =
- ((ClusterConnection) UTIL.getConnection()).getAdmin(getLocation().getServerName());
- CompactRegionRequest request = RequestConverter.buildCompactRegionRequest(
- getLocation().getRegionInfo().getRegionName(), true, null);
- server.compactRegion(null, request);
- numCompactions.incrementAndGet();
- return null;
- }
- };
- caller.callWithRetries(callable, Integer.MAX_VALUE);
+ try (RegionLocator locator = conn.getRegionLocator(tableName)) {
+ HRegionLocation loc = locator.getRegionLocation(Bytes.toBytes("aaa"), true);
+ conn.getAdmin().compactRegion(loc.getRegion().getRegionName());
+ numCompactions.incrementAndGet();
+ }
}
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 4a359e4..7c6598d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
@@ -829,7 +828,7 @@ public class TestHeapMemoryManager {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -844,12 +843,6 @@ public class TestHeapMemoryManager {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
index 43da846..b52bf19 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.SplitLogTask;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -132,7 +131,7 @@ public class TestSplitLogWorker {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -142,12 +141,6 @@ public class TestSplitLogWorker {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index fbc3361..a7c8cf6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
@@ -470,7 +469,7 @@ public class TestWALLockup {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -506,11 +505,6 @@ public class TestWALLockup {
}
@Override
- public ClusterConnection getClusterConnection() {
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index 62ab265..9d3283d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
@@ -209,7 +208,7 @@ public class TestReplicationTrackerZKImpl {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -245,12 +244,6 @@ public class TestReplicationTrackerZKImpl {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 427f319..3a1320c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -852,8 +851,9 @@ public abstract class TestReplicationSourceManager {
public CoordinatedStateManager getCoordinatedStateManager() {
return null;
}
+
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -888,12 +888,6 @@ public abstract class TestReplicationSourceManager {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 92c8e54..a2981fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
@@ -211,7 +210,7 @@ public class TestTokenAuthentication {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -355,12 +354,6 @@ public class TestTokenAuthentication {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public Connection createConnection(Configuration conf) throws IOException {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index d25ccef..9e29763 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
@@ -74,9 +73,6 @@ import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-
/**
* This is the base class for HBaseFsck's ability to detect reasons for inconsistent tables.
*
@@ -98,7 +94,7 @@ public class BaseTestHBaseFsck {
protected static RegionStates regionStates;
protected static ExecutorService tableExecutorService;
protected static ScheduledThreadPoolExecutor hbfsckExecutorService;
- protected static ClusterConnection connection;
+ protected static Connection connection;
protected static Admin admin;
// for the instance, reset every test run
@@ -298,9 +294,6 @@ public class BaseTestHBaseFsck {
/**
* delete table in preparation for next test
- *
- * @param tablename
- * @throws IOException
*/
void cleanupTable(TableName tablename) throws Exception {
if (tbl != null) {
@@ -319,10 +312,8 @@ public class BaseTestHBaseFsck {
Collection<ServerName> regionServers = status.getLiveServerMetrics().keySet();
Map<ServerName, List<String>> mm = new HashMap<>();
for (ServerName hsi : regionServers) {
- AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi);
-
// list all online regions from this region server
- List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
+ List<RegionInfo> regions = admin.getRegions(hsi);
List<String> regionNames = new ArrayList<>(regions.size());
for (RegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 13212d2..380c1c7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -104,7 +103,7 @@ public class MockServer implements Server {
}
@Override
- public ClusterConnection getConnection() {
+ public Connection getConnection() {
return null;
}
@@ -115,7 +114,6 @@ public class MockServer implements Server {
@Override
public boolean isAborted() {
- // TODO Auto-generated method stub
return this.aborted;
}
@@ -125,12 +123,6 @@ public class MockServer implements Server {
}
@Override
- public ClusterConnection getClusterConnection() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public FileSystem getFileSystem() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 0a66ec0..f245384 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@@ -56,7 +56,7 @@ public abstract class MultiThreadedAction {
protected final TableName tableName;
protected final Configuration conf;
- protected final ClusterConnection connection; // all reader / writer threads will share this connection
+ protected final Connection connection; // all reader / writer threads will share this connection
protected int numThreads = 1;
@@ -151,7 +151,7 @@ public abstract class MultiThreadedAction {
this.dataGenerator = dataGen;
this.tableName = tableName;
this.actionLetter = actionLetter;
- this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
+ this.connection = ConnectionFactory.createConnection(conf);
}
public void start(long startKey, long endKey, int numThreads) throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
index 3686150..09ae96f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.io.hfile.TestHFile;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -74,7 +73,7 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck {
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
regionStates = assignmentManager.getRegionStates();
- connection = (ClusterConnection) TEST_UTIL.getConnection();
+ connection = TEST_UTIL.getConnection();
admin = connection.getAdmin();
admin.balancerSwitch(false, true);
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
index 36e513c..abaaba0 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
@@ -29,9 +29,7 @@ import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
-
import javax.net.ssl.SSLException;
-
import org.apache.commons.lang3.NotImplementedException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index d947a86..2b3a80a 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -41,11 +41,11 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@@ -144,7 +144,7 @@ public class TestThriftConnection {
private static Connection createConnection(int port, boolean useHttp) throws IOException {
Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
- conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL,
+ conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
ThriftConnection.class.getName());
if (useHttp) {
conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,