You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2015/08/14 19:07:26 UTC
[6/6] hbase git commit: HBASE-6721 RegionServer Group based
Assignment (Francis Liu)
HBASE-6721 RegionServer Group based Assignment (Francis Liu)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66e16163
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66e16163
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66e16163
Branch: refs/heads/hbase-6721-0.98
Commit: 66e16163f9f8289704889a5062365f69dc19f735
Parents: 9e720ef
Author: Andrew Purtell <ap...@apache.org>
Authored: Fri Aug 14 10:04:45 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Fri Aug 14 10:04:45 2015 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/ServerName.java | 21 +-
.../apache/hadoop/hbase/client/HConnection.java | 7 +
.../hadoop/hbase/client/HConnectionManager.java | 69 +
.../hadoop/hbase/group/GroupAdminClient.java | 192 +
.../hadoop/hbase/protobuf/ProtobufUtil.java | 33 +
.../java/org/apache/hadoop/hbase/HostPort.java | 76 +
.../apache/hadoop/hbase/group/GroupAdmin.java | 108 +
.../apache/hadoop/hbase/group/GroupInfo.java | 182 +
.../hbase/group/IntegrationTestGroup.java | 89 +
.../hbase/protobuf/generated/HBaseProtos.java | 763 +-
.../hbase/protobuf/generated/MasterProtos.java | 12050 ++++++++++++++++-
.../hbase/protobuf/generated/RSGroupProtos.java | 1330 ++
hbase-protocol/src/main/protobuf/HBase.proto | 5 +
hbase-protocol/src/main/protobuf/Master.proto | 107 +
hbase-protocol/src/main/protobuf/RSGroup.proto | 32 +
.../BaseMasterAndRegionObserver.java | 53 +
.../hbase/coprocessor/BaseMasterObserver.java | 52 +
.../hbase/coprocessor/MasterObserver.java | 96 +
.../hadoop/hbase/group/GroupAdminServer.java | 493 +
.../hbase/group/GroupBasedLoadBalancer.java | 411 +
.../hadoop/hbase/group/GroupInfoManager.java | 129 +
.../hbase/group/GroupInfoManagerImpl.java | 667 +
.../apache/hadoop/hbase/group/GroupSerDe.java | 88 +
.../apache/hadoop/hbase/group/GroupTracker.java | 341 +
.../hadoop/hbase/group/GroupableBalancer.java | 12 +
.../org/apache/hadoop/hbase/group/MXBean.java | 71 +
.../apache/hadoop/hbase/group/MXBeanImpl.java | 95 +
.../hadoop/hbase/master/AssignmentManager.java | 38 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 237 +-
.../hadoop/hbase/master/LoadBalancer.java | 3 +
.../hbase/master/MasterCoprocessorHost.java | 112 +
.../hadoop/hbase/master/MasterServices.java | 11 +
.../master/handler/CreateTableHandler.java | 13 +
.../master/handler/DeleteTableHandler.java | 6 +
.../hbase/security/access/AccessController.java | 32 +
.../hbase/coprocessor/TestMasterObserver.java | 51 +
.../apache/hadoop/hbase/group/TestGroups.java | 389 +
.../hadoop/hbase/group/TestGroupsBase.java | 567 +
.../hbase/group/TestGroupsOfflineMode.java | 181 +
.../hbase/group/VerifyingGroupAdminClient.java | 155 +
.../master/TestAssignmentManagerOnCluster.java | 148 +-
.../hadoop/hbase/master/TestCatalogJanitor.java | 21 +-
.../balancer/TestGroupBasedLoadBalancer.java | 588 +
.../security/access/TestAccessController.java | 75 +
hbase-shell/src/main/ruby/hbase.rb | 1 +
hbase-shell/src/main/ruby/hbase/group_admin.rb | 121 +
hbase-shell/src/main/ruby/hbase/hbase.rb | 4 +
hbase-shell/src/main/ruby/shell.rb | 21 +
hbase-shell/src/main/ruby/shell/commands.rb | 4 +
.../src/main/ruby/shell/commands/add_group.rb | 39 +
.../main/ruby/shell/commands/balance_group.rb | 37 +
.../src/main/ruby/shell/commands/get_group.rb | 44 +
.../ruby/shell/commands/get_server_group.rb | 40 +
.../main/ruby/shell/commands/get_table_group.rb | 41 +
.../src/main/ruby/shell/commands/list_groups.rb | 50 +
.../ruby/shell/commands/move_group_servers.rb | 37 +
.../ruby/shell/commands/move_group_tables.rb | 37 +
.../main/ruby/shell/commands/remove_group.rb | 37 +
58 files changed, 20407 insertions(+), 305 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java
index 36a67fe..743e425 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java
@@ -88,9 +88,8 @@ public class ServerName implements Comparable<ServerName>, Serializable {
public static final String UNKNOWN_SERVERNAME = "#unknown#";
private final String servername;
- private final String hostnameOnly;
- private final int port;
private final long startcode;
+ private final HostPort hostPort;
/**
* Cached versioned bytes of this ServerName instance.
@@ -102,10 +101,9 @@ public class ServerName implements Comparable<ServerName>, Serializable {
private ServerName(final String hostname, final int port, final long startcode) {
// Drop the domain is there is one; no need of it in a local cluster. With it, we get long
// unwieldy names.
- this.hostnameOnly = hostname;
- this.port = port;
+ this.hostPort = new HostPort(hostname, port);
this.startcode = startcode;
- this.servername = getServerName(this.hostnameOnly, port, startcode);
+ this.servername = getServerName(hostname, port, startcode);
}
/**
@@ -189,7 +187,8 @@ public class ServerName implements Comparable<ServerName>, Serializable {
* in compares, etc.
*/
public String toShortString() {
- return Addressing.createHostAndPortStr(getHostNameMinusDomain(this.hostnameOnly), this.port);
+ return Addressing.createHostAndPortStr(
+ getHostNameMinusDomain(hostPort.getHostname()), hostPort.getPort());
}
/**
@@ -208,11 +207,11 @@ public class ServerName implements Comparable<ServerName>, Serializable {
}
public String getHostname() {
- return hostnameOnly;
+ return hostPort.getHostname();
}
public int getPort() {
- return port;
+ return hostPort.getPort();
}
public long getStartcode() {
@@ -256,7 +255,11 @@ public class ServerName implements Comparable<ServerName>, Serializable {
* {@link Addressing#createHostAndPortStr(String, int)}
*/
public String getHostAndPort() {
- return Addressing.createHostAndPortStr(this.hostnameOnly, this.port);
+ return Addressing.createHostAndPortStr(hostPort.getHostname(), hostPort.getPort());
+ }
+
+ public HostPort getHostPort() {
+ return hostPort;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index e267c50..9087198 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.group.GroupAdmin;
+import org.apache.hadoop.hbase.group.GroupAdminClient;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
@@ -555,4 +557,9 @@ public interface HConnection extends Abortable, Closeable {
* @return the configured client backoff policy
*/
ClientBackoffPolicy getBackoffPolicy();
+
+ /**
+ * @return client for region server group apis
+ */
+ GroupAdmin getGroupAdmin() throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
index a85bda6..6fc2e4a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
@@ -71,6 +71,8 @@ import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
+import org.apache.hadoop.hbase.group.GroupAdmin;
+import org.apache.hadoop.hbase.group.GroupAdminClient;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -81,8 +83,12 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
@@ -111,6 +117,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusR
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
@@ -131,6 +143,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshot
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
@@ -146,8 +160,14 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
@@ -2220,6 +2240,50 @@ public class HConnectionManager {
SecurityCapabilitiesRequest request) throws ServiceException {
return stub.getSecurityCapabilities(controller, request);
}
+
+ public GetGroupInfoResponse getGroupInfo(RpcController controller, GetGroupInfoRequest request) throws ServiceException {
+ return stub.getGroupInfo(controller, request);
+ }
+
+ @Override
+ public GetGroupInfoOfTableResponse getGroupInfoOfTable(RpcController controller, GetGroupInfoOfTableRequest request) throws ServiceException {
+ return stub.getGroupInfoOfTable(controller, request);
+ }
+
+ @Override
+ public GetGroupInfoOfServerResponse getGroupInfoOfServer(RpcController controller, GetGroupInfoOfServerRequest request) throws ServiceException {
+ return stub.getGroupInfoOfServer(controller, request);
+ }
+
+ @Override
+ public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request) throws ServiceException {
+ return stub.moveServers(controller, request);
+ }
+
+ @Override
+ public MoveTablesResponse moveTables(RpcController controller, MoveTablesRequest request) throws ServiceException {
+ return stub.moveTables(controller, request);
+ }
+
+ @Override
+ public AddGroupResponse addGroup(RpcController controller, AddGroupRequest request) throws ServiceException {
+ return stub.addGroup(controller, request);
+ }
+
+ @Override
+ public RemoveGroupResponse removeGroup(RpcController controller, RemoveGroupRequest request) throws ServiceException {
+ return stub.removeGroup(controller, request);
+ }
+
+ @Override
+ public BalanceGroupResponse balanceGroup(RpcController controller, BalanceGroupRequest request) throws ServiceException {
+ return stub.balanceGroup(controller, request);
+ }
+
+ @Override
+ public ListGroupInfosResponse listGroupInfos(RpcController controller, ListGroupInfosRequest request) throws ServiceException {
+ return stub.listGroupInfos(controller, request);
+ }
};
}
@@ -2498,6 +2562,11 @@ public class HConnectionManager {
return this.backoffPolicy;
}
+ @Override
+ public GroupAdmin getGroupAdmin() throws IOException {
+ return new GroupAdminClient(conf);
+ }
+
/*
* Return the number of cached region for a table. It will only be called
* from a unit test.
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java
new file mode 100644
index 0000000..691e9dc
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java
@@ -0,0 +1,192 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.group;
+
+import com.google.common.collect.Sets;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HostPort;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Client used for managing region server group information.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class GroupAdminClient implements GroupAdmin {
+ private MasterProtos.MasterService.BlockingInterface proxy;
+ private static final Log LOG = LogFactory.getLog(GroupAdminClient.class);
+
+ public GroupAdminClient(Configuration conf) throws IOException {
+ proxy = new HBaseAdmin(conf).getConnection().getKeepAliveMasterService();
+ }
+
+ @Override
+ public GroupInfo getGroupInfo(String groupName) throws IOException {
+ try {
+ MasterProtos.GetGroupInfoResponse resp =
+ proxy.getGroupInfo(null,
+ MasterProtos.GetGroupInfoRequest.newBuilder().setGroupName(groupName).build());
+ if(resp.hasGroupInfo()) {
+ return ProtobufUtil.toGroupInfo(resp.getGroupInfo());
+ }
+ return null;
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException {
+ MasterProtos.GetGroupInfoOfTableRequest request =
+ MasterProtos.GetGroupInfoOfTableRequest.newBuilder()
+ .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
+
+ try {
+ return ProtobufUtil.toGroupInfo(proxy.getGroupInfoOfTable(null, request).getGroupInfo());
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void moveServers(Set<HostPort> servers, String targetGroup) throws IOException {
+ Set<HBaseProtos.HostPort> hostPorts = Sets.newHashSet();
+ for(HostPort el: servers) {
+ hostPorts.add(HBaseProtos.HostPort.newBuilder()
+ .setHostName(el.getHostname())
+ .setPort(el.getPort())
+ .build());
+ }
+ MasterProtos.MoveServersRequest request =
+ MasterProtos.MoveServersRequest.newBuilder()
+ .setTargetGroup(targetGroup)
+ .addAllServers(hostPorts).build();
+
+ try {
+ proxy.moveServers(null, request);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void moveTables(Set<TableName> tables, String targetGroup) throws IOException {
+ MasterProtos.MoveTablesRequest.Builder builder =
+ MasterProtos.MoveTablesRequest.newBuilder()
+ .setTargetGroup(targetGroup);
+ for(TableName tableName: tables) {
+ builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
+ }
+ try {
+ proxy.moveTables(null, builder.build());
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void addGroup(String groupName) throws IOException {
+ MasterProtos.AddGroupRequest request =
+ MasterProtos.AddGroupRequest.newBuilder()
+ .setGroupName(groupName).build();
+ try {
+ proxy.addGroup(null, request);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void removeGroup(String name) throws IOException {
+ MasterProtos.RemoveGroupRequest request =
+ MasterProtos.RemoveGroupRequest.newBuilder()
+ .setGroupName(name).build();
+ try {
+ proxy.removeGroup(null, request);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public boolean balanceGroup(String name) throws IOException {
+ MasterProtos.BalanceGroupRequest request =
+ MasterProtos.BalanceGroupRequest.newBuilder()
+ .setGroupName(name).build();
+
+ try {
+ return proxy.balanceGroup(null, request).getBalanceRan();
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public List<GroupInfo> listGroups() throws IOException {
+ try {
+ List<RSGroupProtos.GroupInfo> resp =
+ proxy.listGroupInfos(null, MasterProtos.ListGroupInfosRequest.newBuilder().build())
+ .getGroupInfoList();
+ List<GroupInfo> result = new ArrayList<GroupInfo>(resp.size());
+ for(RSGroupProtos.GroupInfo entry: resp) {
+ result.add(ProtobufUtil.toGroupInfo(entry));
+ }
+ return result;
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public GroupInfo getGroupOfServer(HostPort hostPort) throws IOException {
+ MasterProtos.GetGroupInfoOfServerRequest request =
+ MasterProtos.GetGroupInfoOfServerRequest.newBuilder()
+ .setServer(HBaseProtos.HostPort.newBuilder()
+ .setHostName(hostPort.getHostname())
+ .setPort(hostPort.getPort())
+ .build())
+ .build();
+ try {
+ return ProtobufUtil.toGroupInfo(
+ proxy.getGroupInfoOfServer(null, request).getGroupInfo());
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 2b80e87..a851416 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -31,6 +31,7 @@ import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
import com.google.protobuf.TextFormat;
+import org.apache.hadoop.hbase.HostPort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
@@ -62,6 +63,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.io.LimitInputStream;
+import org.apache.hadoop.hbase.group.GroupInfo;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
@@ -114,6 +116,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableReques
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
@@ -2954,4 +2957,34 @@ public final class ProtobufUtil {
}
return scList;
}
+
+ public static GroupInfo toGroupInfo(RSGroupProtos.GroupInfo proto) {
+ GroupInfo groupInfo = new GroupInfo(proto.getName());
+ for(HBaseProtos.HostPort el: proto.getServersList()) {
+ groupInfo.addServer(new HostPort(el.getHostName(), el.getPort()));
+ }
+ for(HBaseProtos.TableName pTableName: proto.getTablesList()) {
+ groupInfo.addTable(ProtobufUtil.toTableName(pTableName));
+ }
+ return groupInfo;
+ }
+
+ public static RSGroupProtos.GroupInfo toProtoGroupInfo(GroupInfo pojo) {
+ List<HBaseProtos.TableName> tables =
+ new ArrayList<HBaseProtos.TableName>(pojo.getTables().size());
+ for(TableName arg: pojo.getTables()) {
+ tables.add(ProtobufUtil.toProtoTableName(arg));
+ }
+ List<HBaseProtos.HostPort> hostports =
+ new ArrayList<HBaseProtos.HostPort>(pojo.getServers().size());
+ for(HostPort el: pojo.getServers()) {
+ hostports.add(HBaseProtos.HostPort.newBuilder()
+ .setHostName(el.getHostname())
+ .setPort(el.getPort())
+ .build());
+ }
+ return RSGroupProtos.GroupInfo.newBuilder().setName(pojo.getName())
+ .addAllServers(hostports)
+ .addAllTables(tables).build();
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-common/src/main/java/org/apache/hadoop/hbase/HostPort.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HostPort.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HostPort.java
new file mode 100644
index 0000000..c047ee0
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HostPort.java
@@ -0,0 +1,76 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Addressing;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HostPort implements Comparable<HostPort> {
+ private final String hostnameOnly;
+ private final int port;
+
+ public HostPort(final String hostname, final int port) {
+ this.hostnameOnly = hostname;
+ this.port = port;
+ }
+
+ public String getHostname() {
+ return hostnameOnly;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public static HostPort valueOf(final String hostport) {
+ String splits[] = hostport.split(":",2);
+ if(splits.length < 2)
+ throw new IllegalArgumentException("Server list contains not a valid <HOST>:<PORT> entry");
+ return new HostPort(splits[0], Integer.parseInt(splits[1]));
+ }
+
+ @Override
+ public String toString() {
+ return Addressing.createHostAndPortStr(this.hostnameOnly, this.port);
+ }
+
+ @Override
+ public int compareTo(HostPort other) {
+ int compare = this.getHostname().compareToIgnoreCase(other.getHostname());
+ if (compare != 0) return compare;
+ compare = this.getPort() - other.getPort();
+ return compare;
+ }
+
+ @Override
+ public int hashCode() {
+ return toString().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null) return false;
+ if (!(o instanceof HostPort)) return false;
+ return this.compareTo((HostPort)o) == 0;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java
new file mode 100644
index 0000000..822c1ef
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java
@@ -0,0 +1,108 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.group;
+
+import org.apache.hadoop.hbase.HostPort;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Group user API interface used between client and server.
+ */
+@InterfaceAudience.Private
+public interface GroupAdmin extends Closeable {
+ /**
+ * Gets the group information.
+ *
+ * @param groupName the group name
+ * @return An instance of GroupInfo
+ */
+ GroupInfo getGroupInfo(String groupName) throws IOException;
+
+ /**
+ * Gets the group info of table.
+ *
+ * @param tableName the table name
+ * @return An instance of GroupInfo.
+ */
+ GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException;
+
+ /**
+ * Move a set of serves to another group
+ *
+ *
+ * @param servers set of servers, must be in the form HOST:PORT
+ * @param targetGroup the target group
+ * @throws java.io.IOException Signals that an I/O exception has occurred.
+ */
+ void moveServers(Set<HostPort> servers, String targetGroup) throws IOException;
+
+ /**
+ * Move tables to a new group.
+ * This will unassign all of a table's region so it can be reassigned to the correct group.
+ * @param tables list of tables to move
+ * @param targetGroup target group
+ * @throws java.io.IOException
+ */
+ void moveTables(Set<TableName> tables, String targetGroup) throws IOException;
+
+ /**
+ * Add a new group
+ * @param name name of the group
+ * @throws java.io.IOException
+ */
+ void addGroup(String name) throws IOException;
+
+ /**
+ * Remove a group
+ * @param name name of the group
+ * @throws java.io.IOException
+ */
+ void removeGroup(String name) throws IOException;
+
+ /**
+ * Balance the regions in a group
+ *
+ * @param name the name of the gorup to balance
+ * @return
+ * @throws java.io.IOException
+ */
+ boolean balanceGroup(String name) throws IOException;
+
+ /**
+ * Lists the existing groups.
+ *
+ * @return Collection of GroupInfo.
+ */
+ List<GroupInfo> listGroups() throws IOException;
+
+ /**
+ * Retrieve the GroupInfo a server is affiliated to
+ * @param hostPort
+ * @return
+ * @throws java.io.IOException
+ */
+ GroupInfo getGroupOfServer(HostPort hostPort) throws IOException;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java
new file mode 100644
index 0000000..41a6e2e
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java
@@ -0,0 +1,182 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.group;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.hbase.HostPort;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.NavigableSet;
+
+/**
+ * Stores the group information of region server groups.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class GroupInfo implements Serializable {
+
+ public static final String DEFAULT_GROUP = "default";
+ public static final String NAMESPACEDESC_PROP_GROUP = "hbase.rsgroup.name";
+ public static final String TABLEDESC_PROP_GROUP = "hbase.rsgroup.name";
+ public static final String TRANSITION_GROUP_PREFIX = "_transition_";
+
+ private String name;
+ private NavigableSet<HostPort> servers;
+ private NavigableSet<TableName> tables;
+
+ public GroupInfo(String name) {
+ this(name, Sets.<HostPort>newTreeSet(), Sets.<TableName>newTreeSet());
+ }
+
+ //constructor for jackson
+ @JsonCreator
+ GroupInfo(@JsonProperty("name") String name,
+ @JsonProperty("servers") NavigableSet<HostPort> servers,
+ @JsonProperty("tables") NavigableSet<TableName> tables) {
+ this.name = name;
+ this.servers = servers;
+ this.tables = tables;
+ }
+
+ public GroupInfo(GroupInfo src) {
+ name = src.getName();
+ servers = Sets.newTreeSet(src.getServers());
+ tables = Sets.newTreeSet(src.getTables());
+ }
+
+ /**
+ * Get group name.
+ *
+ * @return
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Adds the server to the group.
+ *
+ * @param hostPort the server
+ */
+ public void addServer(HostPort hostPort){
+ servers.add(hostPort);
+ }
+
+ /**
+ * Adds a group of servers.
+ *
+ * @param hostPort the servers
+ */
+ public void addAllServers(Collection<HostPort> hostPort){
+ servers.addAll(hostPort);
+ }
+
+ /**
+ * @param hostPort
+ * @return true, if a server with hostPort is found
+ */
+ public boolean containsServer(HostPort hostPort) {
+ return servers.contains(hostPort);
+ }
+
+ /**
+ * Get list of servers.
+ *
+ * @return
+ */
+ public NavigableSet<HostPort> getServers() {
+ return servers;
+ }
+
+ /**
+ * Remove a server from this group.
+ *
+ * @param hostPort
+ */
+ public boolean removeServer(HostPort hostPort) {
+ return servers.remove(hostPort);
+ }
+
+ /**
+ * Set of tables that are members of this group
+ * @return
+ */
+ public NavigableSet<TableName> getTables() {
+ return tables;
+ }
+
+ public void addTable(TableName table) {
+ tables.add(table);
+ }
+
+ public void addAllTables(Collection<TableName> arg) {
+ tables.addAll(arg);
+ }
+
+ public boolean containsTable(TableName table) {
+ return tables.contains(table);
+ }
+
+ public boolean removeTable(TableName table) {
+ return tables.remove(table);
+ }
+
+ @Override
+ public String toString() {
+ StringBuffer sb = new StringBuffer();
+ sb.append("GroupName:");
+ sb.append(this.name);
+ sb.append(", ");
+ sb.append(" Servers:");
+ sb.append(this.servers);
+ return sb.toString();
+
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ GroupInfo groupInfo = (GroupInfo) o;
+
+ if (!name.equals(groupInfo.name)) return false;
+ if (!servers.equals(groupInfo.servers)) return false;
+ if (!tables.equals(groupInfo.tables)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = servers.hashCode();
+ result = 31 * result + tables.hashCode();
+ result = 31 * result + name.hashCode();
+ return result;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
new file mode 100644
index 0000000..62f4f8a
--- /dev/null
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
@@ -0,0 +1,89 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.group;
+
+import com.google.common.collect.Sets;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Runs all of the units tests defined in TestGroupBase
+ * as an integration test.
+ * Requires TestGroupBase.NUM_SLAVE_BASE servers to run.
+ */
+@Category(IntegrationTests.class)
+public class IntegrationTestGroup extends TestGroupsBase {
+ //Integration specific
+ private final static Log LOG = LogFactory.getLog(IntegrationTestGroup.class);
+ private static boolean initialized = false;
+
+ @Before
+ public void beforeMethod() throws Exception {
+ if(!initialized) {
+ LOG.info("Setting up IntegrationTestGroup");
+ LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers");
+ TEST_UTIL = new IntegrationTestingUtility();
+ ((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE);
+ //set shared configs
+ admin = TEST_UTIL.getHBaseAdmin();
+ cluster = TEST_UTIL.getHBaseClusterInterface();
+ groupAdmin = new VerifyingGroupAdminClient(admin.getConnection().getGroupAdmin(),
+ TEST_UTIL.getConfiguration());
+ LOG.info("Done initializing cluster");
+ initialized = true;
+ //cluster may not be clean
+ //cleanup when initializing
+ afterMethod();
+ }
+ }
+
+ @After
+ public void afterMethod() throws Exception {
+ LOG.info("Cleaning up previous test run");
+ //cleanup previous artifacts
+ deleteTableIfNecessary();
+ deleteNamespaceIfNecessary();
+ deleteGroups();
+ admin.setBalancerRunning(false,true);
+
+ LOG.info("Restoring the cluster");
+ ((IntegrationTestingUtility)TEST_UTIL).restoreCluster();
+ LOG.info("Done restoring the cluster");
+
+ TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
+ @Override
+ public boolean evaluate() throws Exception {
+ LOG.info("Waiting for cleanup to finish "+groupAdmin.listGroups());
+ //Might be greater since moving servers back to default
+ //is after starting a server
+ return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size()
+ == NUM_SLAVES_BASE;
+ }
+ });
+ LOG.info("Done cleaning up previous test run");
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/66e16163/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index 9c0447e..81dbd77 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -139,6 +139,621 @@ public final class HBaseProtos {
// @@protoc_insertion_point(enum_scope:CompareType)
}
+ public interface HostPortOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string host_name = 1;
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ boolean hasHostName();
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ java.lang.String getHostName();
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getHostNameBytes();
+
+ // required uint32 port = 2;
+ /**
+ * <code>required uint32 port = 2;</code>
+ */
+ boolean hasPort();
+ /**
+ * <code>required uint32 port = 2;</code>
+ */
+ int getPort();
+ }
+ /**
+ * Protobuf type {@code HostPort}
+ */
+ public static final class HostPort extends
+ com.google.protobuf.GeneratedMessage
+ implements HostPortOrBuilder {
+ // Use HostPort.newBuilder() to construct.
+ private HostPort(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private HostPort(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final HostPort defaultInstance;
+ public static HostPort getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public HostPort getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private HostPort(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ hostName_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ port_ = input.readUInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<HostPort> PARSER =
+ new com.google.protobuf.AbstractParser<HostPort>() {
+ public HostPort parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new HostPort(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<HostPort> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string host_name = 1;
+ public static final int HOST_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object hostName_;
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public boolean hasHostName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public java.lang.String getHostName() {
+ java.lang.Object ref = hostName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ hostName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getHostNameBytes() {
+ java.lang.Object ref = hostName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hostName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required uint32 port = 2;
+ public static final int PORT_FIELD_NUMBER = 2;
+ private int port_;
+ /**
+ * <code>required uint32 port = 2;</code>
+ */
+ public boolean hasPort() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required uint32 port = 2;</code>
+ */
+ public int getPort() {
+ return port_;
+ }
+
+ private void initFields() {
+ hostName_ = "";
+ port_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasHostName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasPort()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getHostNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt32(2, port_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getHostNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(2, port_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) obj;
+
+ boolean result = true;
+ result = result && (hasHostName() == other.hasHostName());
+ if (hasHostName()) {
+ result = result && getHostName()
+ .equals(other.getHostName());
+ }
+ result = result && (hasPort() == other.hasPort());
+ if (hasPort()) {
+ result = result && (getPort()
+ == other.getPort());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasHostName()) {
+ hash = (37 * hash) + HOST_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getHostName().hashCode();
+ }
+ if (hasPort()) {
+ hash = (37 * hash) + PORT_FIELD_NUMBER;
+ hash = (53 * hash) + getPort();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code HostPort}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ hostName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ port_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort build() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.hostName_ = hostName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.port_ = port_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()) return this;
+ if (other.hasHostName()) {
+ bitField0_ |= 0x00000001;
+ hostName_ = other.hostName_;
+ onChanged();
+ }
+ if (other.hasPort()) {
+ setPort(other.getPort());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasHostName()) {
+
+ return false;
+ }
+ if (!hasPort()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string host_name = 1;
+ private java.lang.Object hostName_ = "";
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public boolean hasHostName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public java.lang.String getHostName() {
+ java.lang.Object ref = hostName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ hostName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getHostNameBytes() {
+ java.lang.Object ref = hostName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hostName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public Builder setHostName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ hostName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public Builder clearHostName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ hostName_ = getDefaultInstance().getHostName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string host_name = 1;</code>
+ */
+ public Builder setHostNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ hostName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required uint32 port = 2;
+ private int port_ ;
+ /**
+ * <code>required uint32 port = 2;</code>
+ */
+ public boolean hasPort() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required uint32 port = 2;</code>
+ */
+ public int getPort() {
+ return port_;
+ }
+ /**
+ * <code>required uint32 port = 2;</code>
+ */
+ public Builder setPort(int value) {
+ bitField0_ |= 0x00000002;
+ port_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required uint32 port = 2;</code>
+ */
+ public Builder clearPort() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ port_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:HostPort)
+ }
+
+ static {
+ defaultInstance = new HostPort(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:HostPort)
+ }
+
public interface TableNameOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -16107,6 +16722,11 @@ public final class HBaseProtos {
}
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_HostPort_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_HostPort_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_TableName_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -16225,186 +16845,193 @@ public final class HBaseProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\013HBase.proto\032\nCell.proto\"1\n\tTableName\022\021" +
- "\n\tnamespace\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(\014\"\250\001\n" +
- "\013TableSchema\022\036\n\ntable_name\030\001 \001(\0132\n.Table" +
- "Name\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPai" +
- "r\022,\n\017column_families\030\003 \003(\0132\023.ColumnFamil" +
- "ySchema\022&\n\rconfiguration\030\004 \003(\0132\017.NameStr" +
- "ingPair\"o\n\022ColumnFamilySchema\022\014\n\004name\030\001 " +
- "\002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair" +
- "\022&\n\rconfiguration\030\003 \003(\0132\017.NameStringPair" +
- "\"\203\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta",
- "ble_name\030\002 \002(\0132\n.TableName\022\021\n\tstart_key\030" +
- "\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022" +
- "\r\n\005split\030\006 \001(\010\"1\n\014FavoredNodes\022!\n\014favore" +
- "d_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpec" +
- "ifier\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.Re" +
- "gionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023Regi" +
- "onSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCO" +
- "DED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001" +
- " \001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_n" +
- "ame\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 ",
- "\001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016Name" +
- "StringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"" +
- ",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030" +
- "\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016" +
- "\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030" +
- "\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\275\001\n\023SnapshotDescrip" +
- "tion\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcre" +
- "ation_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snap" +
- "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" +
- " \001(\005\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n",
- "\tSKIPFLUSH\020\002\"}\n\024ProcedureDescription\022\021\n\t" +
- "signature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcre" +
- "ation_time\030\003 \001(\003:\0010\022&\n\rconfiguration\030\004 \003" +
- "(\0132\017.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007LongM" +
- "sg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022\n\ndou" +
- "ble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016bigdec" +
- "imal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits" +
- "\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"K\n\023Namespa" +
- "ceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfigurat" +
- "ion\030\002 \003(\0132\017.NameStringPair\"$\n\020RegionServ",
- "erInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013CompareType\022" +
- "\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022" +
- "\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007" +
- "GREATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.apache.hadoo" +
- "p.hbase.protobuf.generatedB\013HBaseProtosH" +
- "\001\240\001\001"
+ "\n\013HBase.proto\032\nCell.proto\"+\n\010HostPort\022\021\n" +
+ "\thost_name\030\001 \002(\t\022\014\n\004port\030\002 \002(\r\"1\n\tTableN" +
+ "ame\022\021\n\tnamespace\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(" +
+ "\014\"\250\001\n\013TableSchema\022\036\n\ntable_name\030\001 \001(\0132\n." +
+ "TableName\022#\n\nattributes\030\002 \003(\0132\017.BytesByt" +
+ "esPair\022,\n\017column_families\030\003 \003(\0132\023.Column" +
+ "FamilySchema\022&\n\rconfiguration\030\004 \003(\0132\017.Na" +
+ "meStringPair\"o\n\022ColumnFamilySchema\022\014\n\004na" +
+ "me\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesByte" +
+ "sPair\022&\n\rconfiguration\030\003 \003(\0132\017.NameStrin",
+ "gPair\"\203\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022" +
+ "\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\021\n\tstart" +
+ "_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005" +
+ " \001(\010\022\r\n\005split\030\006 \001(\010\"1\n\014FavoredNodes\022!\n\014f" +
+ "avored_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017Regio" +
+ "nSpecifier\0222\n\004type\030\001 \002(\0162$.RegionSpecifi" +
+ "er.RegionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n" +
+ "\023RegionSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n" +
+ "\023ENCODED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004f" +
+ "rom\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\th",
+ "ost_name\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_co" +
+ "de\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n" +
+ "\016NameStringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002" +
+ " \002(\t\",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005v" +
+ "alue\030\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 " +
+ "\002(\014\022\016\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004" +
+ "name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\275\001\n\023SnapshotDe" +
+ "scription\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030" +
+ "\n\rcreation_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031" +
+ ".SnapshotDescription.Type:\005FLUSH\022\017\n\007vers",
+ "ion\030\005 \001(\005\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" +
+ "\020\001\022\r\n\tSKIPFLUSH\020\002\"}\n\024ProcedureDescriptio" +
+ "n\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030" +
+ "\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rconfiguratio" +
+ "n\030\004 \003(\0132\017.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007" +
+ "LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022" +
+ "\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016b" +
+ "igdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig" +
+ "_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"K\n\023Na" +
+ "mespaceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfi",
+ "guration\030\002 \003(\0132\017.NameStringPair\"$\n\020Regio" +
+ "nServerInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013Compare" +
+ "Type\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQU" +
+ "AL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020" +
+ "\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.apache." +
+ "hadoop.hbase.protobuf.generatedB\013HBasePr" +
+ "otosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
- internal_static_TableName_descriptor =
+ internal_static_HostPort_descriptor =
getDescriptor().getMessageTypes().get(0);
+ internal_static_HostPort_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_HostPort_descriptor,
+ new java.lang.String[] { "HostName", "Port", });
+ internal_static_TableName_descriptor =
+ getDescriptor().getMessageTypes().get(1);
internal_static_TableName_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TableName_descriptor,
new java.lang.String[] { "Namespace", "Qualifier", });
internal_static_TableSchema_descriptor =
- getDescriptor().getMessageTypes().get(1);
+ getDescriptor().getMessageTypes().get(2);
internal_static_TableSchema_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TableSchema_descriptor,
new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", });
internal_static_ColumnFamilySchema_descriptor =
- getDescriptor().getMessageTypes().get(2);
+ getDescriptor().getMessageTypes().get(3);
internal_static_ColumnFamilySchema_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ColumnFamilySchema_descriptor,
new java.lang.String[] { "Name", "Attributes", "Configuration", });
internal_static_RegionInfo_descriptor =
- getDescriptor().getMessageTypes().get(3);
+ getDescriptor().getMessageTypes().get(4);
internal_static_RegionInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionInfo_descriptor,
new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", });
internal_static_FavoredNodes_descriptor =
- getDescriptor().getMessageTypes().get(4);
+ getDescriptor().getMessageTypes().get(5);
internal_static_FavoredNodes_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_FavoredNodes_descriptor,
new java.lang.String[] { "FavoredNode", });
internal_static_RegionSpecifier_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(6);
internal_static_RegionSpecifier_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionSpecifier_descriptor,
new java.lang.String[] { "Type", "Value", });
internal_static_TimeRange_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(7);
internal_static_TimeRange_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TimeRange_descriptor,
new java.lang.String[] { "From", "To", });
internal_static_ServerName_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_ServerName_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ServerName_descriptor,
new java.lang.String[] { "HostName", "Port", "StartCode", });
internal_static_Coprocessor_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_Coprocessor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_Coprocessor_descriptor,
new java.lang.String[] { "Name", });
internal_static_NameStringPair_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_NameStringPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NameStringPair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_NameBytesPair_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_NameBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NameBytesPair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_BytesBytesPair_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_BytesBytesPair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BytesBytesPair_descriptor,
new java.lang.String[] { "First", "Second", });
internal_static_NameInt64Pair_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_NameInt64Pair_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NameInt64Pair_descriptor,
new java.lang.String[] { "Name", "Value", });
internal_static_SnapshotDescription_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_SnapshotDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SnapshotDescription_descriptor,
new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", });
internal_static_ProcedureDescription_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_ProcedureDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ProcedureDescription_descriptor,
new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", });
internal_static_EmptyMsg_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_EmptyMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_EmptyMsg_descriptor,
new java.lang.String[] { });
internal_static_LongMsg_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(17);
internal_static_LongMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_LongMsg_descriptor,
new java.lang.String[] { "LongMsg", });
internal_static_DoubleMsg_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(18);
internal_static_DoubleMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DoubleMsg_descriptor,
new java.lang.String[] { "DoubleMsg", });
internal_static_BigDecimalMsg_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(19);
internal_static_BigDecimalMsg_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BigDecimalMsg_descriptor,
new java.lang.String[] { "BigdecimalMsg", });
internal_static_UUID_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(20);
internal_static_UUID_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_UUID_descriptor,
new java.lang.String[] { "LeastSigBits", "MostSigBits", });
internal_static_NamespaceDescriptor_descriptor =
- getDescriptor().getMessageTypes().get(20);
+ getDescriptor().getMessageTypes().get(21);
internal_static_NamespaceDescriptor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_NamespaceDescriptor_descriptor,
new java.lang.String[] { "Name", "Configuration", });
internal_static_RegionServerInfo_descriptor =
- getDescriptor().getMessageTypes().get(21);
+ getDescriptor().getMessageTypes().get(22);
internal_static_RegionServerInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionServerInfo_descriptor,