You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2015/08/01 18:36:16 UTC

[4/6] hbase git commit: HBASE-6721 RegionServer group based assignment (Francis Liu)

http://git-wip-us.apache.org/repos/asf/hbase/blob/16f65bad/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
new file mode 100644
index 0000000..a39e48a
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
@@ -0,0 +1,1331 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RSGroup.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RSGroupProtos {
+  private RSGroupProtos() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface GroupInfoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string name = 1;
+    /**
+     * <code>required string name = 1;</code>
+     */
+    boolean hasName();
+    /**
+     * <code>required string name = 1;</code>
+     */
+    java.lang.String getName();
+    /**
+     * <code>required string name = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getNameBytes();
+
+    // repeated .hbase.pb.HostPort servers = 4;
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort> 
+        getServersList();
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index);
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    int getServersCount();
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> 
+        getServersOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder(
+        int index);
+
+    // repeated .hbase.pb.TableName tables = 3;
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> 
+        getTablesList();
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index);
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    int getTablesCount();
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+        getTablesOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GroupInfo}
+   */
+  public static final class GroupInfo extends
+      com.google.protobuf.GeneratedMessage
+      implements GroupInfoOrBuilder {
+    // Use GroupInfo.newBuilder() to construct.
+    private GroupInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private GroupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final GroupInfo defaultInstance;
+    public static GroupInfo getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public GroupInfo getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GroupInfo(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              name_ = input.readBytes();
+              break;
+            }
+            case 26: {
+              if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+                tables_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName>();
+                mutable_bitField0_ |= 0x00000004;
+              }
+              tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry));
+              break;
+            }
+            case 34: {
+              if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+                servers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort>();
+                mutable_bitField0_ |= 0x00000002;
+              }
+              servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+          tables_ = java.util.Collections.unmodifiableList(tables_);
+        }
+        if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+          servers_ = java.util.Collections.unmodifiableList(servers_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_GroupInfo_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_GroupInfo_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<GroupInfo> PARSER =
+        new com.google.protobuf.AbstractParser<GroupInfo>() {
+      public GroupInfo parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new GroupInfo(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<GroupInfo> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string name = 1;
+    public static final int NAME_FIELD_NUMBER = 1;
+    private java.lang.Object name_;
+    /**
+     * <code>required string name = 1;</code>
+     */
+    public boolean hasName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string name = 1;</code>
+     */
+    public java.lang.String getName() {
+      java.lang.Object ref = name_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          name_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string name = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getNameBytes() {
+      java.lang.Object ref = name_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        name_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // repeated .hbase.pb.HostPort servers = 4;
+    public static final int SERVERS_FIELD_NUMBER = 4;
+    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort> servers_;
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort> getServersList() {
+      return servers_;
+    }
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> 
+        getServersOrBuilderList() {
+      return servers_;
+    }
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    public int getServersCount() {
+      return servers_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) {
+      return servers_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder(
+        int index) {
+      return servers_.get(index);
+    }
+
+    // repeated .hbase.pb.TableName tables = 3;
+    public static final int TABLES_FIELD_NUMBER = 3;
+    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> tables_;
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> getTablesList() {
+      return tables_;
+    }
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+        getTablesOrBuilderList() {
+      return tables_;
+    }
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    public int getTablesCount() {
+      return tables_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) {
+      return tables_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.TableName tables = 3;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder(
+        int index) {
+      return tables_.get(index);
+    }
+
+    private void initFields() {
+      name_ = "";
+      servers_ = java.util.Collections.emptyList();
+      tables_ = java.util.Collections.emptyList();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      for (int i = 0; i < getServersCount(); i++) {
+        if (!getServers(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      for (int i = 0; i < getTablesCount(); i++) {
+        if (!getTables(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getNameBytes());
+      }
+      for (int i = 0; i < tables_.size(); i++) {
+        output.writeMessage(3, tables_.get(i));
+      }
+      for (int i = 0; i < servers_.size(); i++) {
+        output.writeMessage(4, servers_.get(i));
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getNameBytes());
+      }
+      for (int i = 0; i < tables_.size(); i++) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(3, tables_.get(i));
+      }
+      for (int i = 0; i < servers_.size(); i++) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(4, servers_.get(i));
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) obj;
+
+      boolean result = true;
+      result = result && (hasName() == other.hasName());
+      if (hasName()) {
+        result = result && getName()
+            .equals(other.getName());
+      }
+      result = result && getServersList()
+          .equals(other.getServersList());
+      result = result && getTablesList()
+          .equals(other.getTablesList());
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasName()) {
+        hash = (37 * hash) + NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getName().hashCode();
+      }
+      if (getServersCount() > 0) {
+        hash = (37 * hash) + SERVERS_FIELD_NUMBER;
+        hash = (53 * hash) + getServersList().hashCode();
+      }
+      if (getTablesCount() > 0) {
+        hash = (37 * hash) + TABLES_FIELD_NUMBER;
+        hash = (53 * hash) + getTablesList().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GroupInfo}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_GroupInfo_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_GroupInfo_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getServersFieldBuilder();
+          getTablesFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        name_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (serversBuilder_ == null) {
+          servers_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000002);
+        } else {
+          serversBuilder_.clear();
+        }
+        if (tablesBuilder_ == null) {
+          tables_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000004);
+        } else {
+          tablesBuilder_.clear();
+        }
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_GroupInfo_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo build() {
+        org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.name_ = name_;
+        if (serversBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002)) {
+            servers_ = java.util.Collections.unmodifiableList(servers_);
+            bitField0_ = (bitField0_ & ~0x00000002);
+          }
+          result.servers_ = servers_;
+        } else {
+          result.servers_ = serversBuilder_.build();
+        }
+        if (tablesBuilder_ == null) {
+          if (((bitField0_ & 0x00000004) == 0x00000004)) {
+            tables_ = java.util.Collections.unmodifiableList(tables_);
+            bitField0_ = (bitField0_ & ~0x00000004);
+          }
+          result.tables_ = tables_;
+        } else {
+          result.tables_ = tablesBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) return this;
+        if (other.hasName()) {
+          bitField0_ |= 0x00000001;
+          name_ = other.name_;
+          onChanged();
+        }
+        if (serversBuilder_ == null) {
+          if (!other.servers_.isEmpty()) {
+            if (servers_.isEmpty()) {
+              servers_ = other.servers_;
+              bitField0_ = (bitField0_ & ~0x00000002);
+            } else {
+              ensureServersIsMutable();
+              servers_.addAll(other.servers_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.servers_.isEmpty()) {
+            if (serversBuilder_.isEmpty()) {
+              serversBuilder_.dispose();
+              serversBuilder_ = null;
+              servers_ = other.servers_;
+              bitField0_ = (bitField0_ & ~0x00000002);
+              serversBuilder_ = 
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getServersFieldBuilder() : null;
+            } else {
+              serversBuilder_.addAllMessages(other.servers_);
+            }
+          }
+        }
+        if (tablesBuilder_ == null) {
+          if (!other.tables_.isEmpty()) {
+            if (tables_.isEmpty()) {
+              tables_ = other.tables_;
+              bitField0_ = (bitField0_ & ~0x00000004);
+            } else {
+              ensureTablesIsMutable();
+              tables_.addAll(other.tables_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.tables_.isEmpty()) {
+            if (tablesBuilder_.isEmpty()) {
+              tablesBuilder_.dispose();
+              tablesBuilder_ = null;
+              tables_ = other.tables_;
+              bitField0_ = (bitField0_ & ~0x00000004);
+              tablesBuilder_ = 
+                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+                   getTablesFieldBuilder() : null;
+            } else {
+              tablesBuilder_.addAllMessages(other.tables_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasName()) {
+          
+          return false;
+        }
+        for (int i = 0; i < getServersCount(); i++) {
+          if (!getServers(i).isInitialized()) {
+            
+            return false;
+          }
+        }
+        for (int i = 0; i < getTablesCount(); i++) {
+          if (!getTables(i).isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string name = 1;
+      private java.lang.Object name_ = "";
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public boolean hasName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public java.lang.String getName() {
+        java.lang.Object ref = name_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          name_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getNameBytes() {
+        java.lang.Object ref = name_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          name_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public Builder setName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        name_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public Builder clearName() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        name_ = getDefaultInstance().getName();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string name = 1;</code>
+       */
+      public Builder setNameBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        name_ = value;
+        onChanged();
+        return this;
+      }
+
+      // repeated .hbase.pb.HostPort servers = 4;
+      private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort> servers_ =
+        java.util.Collections.emptyList();
+      private void ensureServersIsMutable() {
+        if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+          servers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort>(servers_);
+          bitField0_ |= 0x00000002;
+         }
+      }
+
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> serversBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort> getServersList() {
+        if (serversBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(servers_);
+        } else {
+          return serversBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public int getServersCount() {
+        if (serversBuilder_ == null) {
+          return servers_.size();
+        } else {
+          return serversBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) {
+        if (serversBuilder_ == null) {
+          return servers_.get(index);
+        } else {
+          return serversBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder setServers(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) {
+        if (serversBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServersIsMutable();
+          servers_.set(index, value);
+          onChanged();
+        } else {
+          serversBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder setServers(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) {
+        if (serversBuilder_ == null) {
+          ensureServersIsMutable();
+          servers_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          serversBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) {
+        if (serversBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServersIsMutable();
+          servers_.add(value);
+          onChanged();
+        } else {
+          serversBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder addServers(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) {
+        if (serversBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServersIsMutable();
+          servers_.add(index, value);
+          onChanged();
+        } else {
+          serversBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder addServers(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) {
+        if (serversBuilder_ == null) {
+          ensureServersIsMutable();
+          servers_.add(builderForValue.build());
+          onChanged();
+        } else {
+          serversBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder addServers(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) {
+        if (serversBuilder_ == null) {
+          ensureServersIsMutable();
+          servers_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          serversBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder addAllServers(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort> values) {
+        if (serversBuilder_ == null) {
+          ensureServersIsMutable();
+          super.addAll(values, servers_);
+          onChanged();
+        } else {
+          serversBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder clearServers() {
+        if (serversBuilder_ == null) {
+          servers_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000002);
+          onChanged();
+        } else {
+          serversBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public Builder removeServers(int index) {
+        if (serversBuilder_ == null) {
+          ensureServersIsMutable();
+          servers_.remove(index);
+          onChanged();
+        } else {
+          serversBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder getServersBuilder(
+          int index) {
+        return getServersFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder(
+          int index) {
+        if (serversBuilder_ == null) {
+          return servers_.get(index);  } else {
+          return serversBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> 
+           getServersOrBuilderList() {
+        if (serversBuilder_ != null) {
+          return serversBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(servers_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder() {
+        return getServersFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder(
+          int index) {
+        return getServersFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.HostPort servers = 4;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder> 
+           getServersBuilderList() {
+        return getServersFieldBuilder().getBuilderList();
+      }
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> 
+          getServersFieldBuilder() {
+        if (serversBuilder_ == null) {
+          serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder>(
+                  servers_,
+                  ((bitField0_ & 0x00000002) == 0x00000002),
+                  getParentForChildren(),
+                  isClean());
+          servers_ = null;
+        }
+        return serversBuilder_;
+      }
+
+      // repeated .hbase.pb.TableName tables = 3;
+      private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> tables_ =
+        java.util.Collections.emptyList();
+      private void ensureTablesIsMutable() {
+        if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+          tables_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName>(tables_);
+          bitField0_ |= 0x00000004;
+         }
+      }
+
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> getTablesList() {
+        if (tablesBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(tables_);
+        } else {
+          return tablesBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public int getTablesCount() {
+        if (tablesBuilder_ == null) {
+          return tables_.size();
+        } else {
+          return tablesBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) {
+        if (tablesBuilder_ == null) {
+          return tables_.get(index);
+        } else {
+          return tablesBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder setTables(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tablesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTablesIsMutable();
+          tables_.set(index, value);
+          onChanged();
+        } else {
+          tablesBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder setTables(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tablesBuilder_ == null) {
+          ensureTablesIsMutable();
+          tables_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          tablesBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tablesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTablesIsMutable();
+          tables_.add(value);
+          onChanged();
+        } else {
+          tablesBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder addTables(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+        if (tablesBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureTablesIsMutable();
+          tables_.add(index, value);
+          onChanged();
+        } else {
+          tablesBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder addTables(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tablesBuilder_ == null) {
+          ensureTablesIsMutable();
+          tables_.add(builderForValue.build());
+          onChanged();
+        } else {
+          tablesBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder addTables(
+          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tablesBuilder_ == null) {
+          ensureTablesIsMutable();
+          tables_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          tablesBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder addAllTables(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> values) {
+        if (tablesBuilder_ == null) {
+          ensureTablesIsMutable();
+          super.addAll(values, tables_);
+          onChanged();
+        } else {
+          tablesBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder clearTables() {
+        if (tablesBuilder_ == null) {
+          tables_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000004);
+          onChanged();
+        } else {
+          tablesBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public Builder removeTables(int index) {
+        if (tablesBuilder_ == null) {
+          ensureTablesIsMutable();
+          tables_.remove(index);
+          onChanged();
+        } else {
+          tablesBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder(
+          int index) {
+        return getTablesFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder(
+          int index) {
+        if (tablesBuilder_ == null) {
+          return tables_.get(index);  } else {
+          return tablesBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+           getTablesOrBuilderList() {
+        if (tablesBuilder_ != null) {
+          return tablesBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(tables_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() {
+        return getTablesFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder(
+          int index) {
+        return getTablesFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.TableName tables = 3;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder> 
+           getTablesBuilderList() {
+        return getTablesFieldBuilder().getBuilderList();
+      }
+      private com.google.protobuf.RepeatedFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+          getTablesFieldBuilder() {
+        if (tablesBuilder_ == null) {
+          tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+                  tables_,
+                  ((bitField0_ & 0x00000004) == 0x00000004),
+                  getParentForChildren(),
+                  isClean());
+          tables_ = null;
+        }
+        return tablesBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GroupInfo)
+    }
+
+    static {
+      defaultInstance = new GroupInfo(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GroupInfo)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GroupInfo_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_GroupInfo_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\rRSGroup.proto\022\010hbase.pb\032\013HBase.proto\"c" +
+      "\n\tGroupInfo\022\014\n\004name\030\001 \002(\t\022#\n\007servers\030\004 \003" +
+      "(\0132\022.hbase.pb.HostPort\022#\n\006tables\030\003 \003(\0132\023" +
+      ".hbase.pb.TableNameBC\n*org.apache.hadoop" +
+      ".hbase.protobuf.generatedB\rRSGroupProtos" +
+      "H\001\210\001\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_hbase_pb_GroupInfo_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_hbase_pb_GroupInfo_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_GroupInfo_descriptor,
+              new java.lang.String[] { "Name", "Servers", "Tables", });
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/16f65bad/hbase-protocol/src/main/protobuf/HBase.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index d09598a..cd0d083 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -237,3 +237,9 @@ message NamespaceDescriptor {
 message RegionServerInfo {
   optional int32 infoPort = 1;
 }
+
+message HostPort {
+  required string host_name = 1;
+  required uint32 port = 2;
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/16f65bad/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 10a7854..3e58492 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -31,6 +31,7 @@ import "Client.proto";
 import "ClusterStatus.proto";
 import "ErrorHandling.proto";
 import "Quota.proto";
+import "RSGroup.proto";
 
 /* Column-level protobufs */
 
@@ -450,6 +451,92 @@ message MajorCompactionTimestampResponse {
   required int64 compaction_timestamp = 1;
 }
 
+/** Group level protobufs */
+
+message ListTablesOfGroupRequest {
+  required string group_name = 1;
+}
+
+message ListTablesOfGroupResponse {
+  repeated TableName table_name = 1;
+}
+
+message GetGroupInfoRequest {
+  required string group_name = 1;
+}
+
+message GetGroupInfoResponse {
+  optional GroupInfo group_info = 1;
+}
+
+message GetGroupInfoOfTableRequest {
+  required TableName table_name = 1;
+}
+
+message GetGroupInfoOfTableResponse {
+  optional GroupInfo group_info = 1;
+}
+
+message MoveServersRequest {
+  required string target_group = 1;
+  repeated HostPort servers = 2;
+}
+
+message MoveServersResponse {
+}
+
+message MoveTablesRequest {
+  required string target_group = 1;
+  repeated TableName table_name = 2;
+}
+
+message MoveTablesResponse {
+}
+
+message AddGroupRequest {
+  required string group_name = 1;
+}
+
+message AddGroupResponse {
+}
+
+message RemoveGroupRequest {
+  required string group_name = 1;
+}
+
+message RemoveGroupResponse {
+}
+
+message BalanceGroupRequest {
+  required string group_name = 1;
+}
+
+message BalanceGroupResponse {
+  required bool balanceRan = 1;
+}
+
+message ListGroupInfosRequest {
+}
+
+message ListGroupInfosResponse {
+  repeated GroupInfo  group_info = 1;
+}
+
+message GetGroupInfoOfServerRequest {
+  required HostPort server = 1;
+}
+
+message GetGroupInfoOfServerResponse {
+  optional GroupInfo group_info = 1;
+}
+
+message ListServersInTransitionRequest {
+}
+
+message ListServersInTransitionResponse {
+  repeated NameStringPair transitions = 1;
+}
+
 service MasterService {
   /** Used by the client to get the number of regions that have received the updated schema */
   rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@@ -681,4 +768,31 @@ service MasterService {
 
   rpc getProcedureResult(GetProcedureResultRequest)
     returns(GetProcedureResultResponse);
+
+  rpc GetGroupInfo(GetGroupInfoRequest)
+    returns (GetGroupInfoResponse);
+
+  rpc GetGroupInfoOfTable(GetGroupInfoOfTableRequest)
+    returns (GetGroupInfoOfTableResponse);
+
+  rpc GetGroupInfoOfServer(GetGroupInfoOfServerRequest)
+    returns (GetGroupInfoOfServerResponse);
+
+  rpc MoveServers(MoveServersRequest)
+    returns (MoveServersResponse);
+
+  rpc MoveTables(MoveTablesRequest)
+    returns (MoveTablesResponse);
+
+  rpc AddGroup(AddGroupRequest)
+    returns (AddGroupResponse);
+
+  rpc RemoveGroup(RemoveGroupRequest)
+    returns (RemoveGroupResponse);
+
+  rpc BalanceGroup(BalanceGroupRequest)
+    returns (BalanceGroupResponse);
+
+  rpc ListGroupInfos(ListGroupInfosRequest)
+    returns (ListGroupInfosResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/16f65bad/hbase-protocol/src/main/protobuf/RSGroup.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/RSGroup.proto b/hbase-protocol/src/main/protobuf/RSGroup.proto
new file mode 100644
index 0000000..920313c
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/RSGroup.proto
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "RSGroupProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+message GroupInfo {
+  required string name = 1;
+  repeated HostPort servers = 4;
+  repeated TableName tables = 3;
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/16f65bad/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index 8cf5954..d11ee38 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -21,12 +21,14 @@ package org.apache.hadoop.hbase.coprocessor;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HostPort;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -595,4 +597,55 @@ public abstract class BaseMasterAndRegionObserver extends BaseRegionObserver
   public void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final String namespace, final Quotas quotas) throws IOException {
   }
+
+  @Override
+  public void preMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                             Set<HostPort> servers, String targetGroup) throws IOException {
+  }
+
+  @Override
+  public void postMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                              Set<HostPort> servers, String targetGroup) throws IOException {
+
+  }
+
+  @Override
+  public void preMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                            Set<TableName> tables, String targetGroup) throws IOException {
+  }
+
+  @Override
+  public void postMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                             Set<TableName> tables, String targetGroup) throws IOException {
+  }
+
+  @Override
+  public void preAddGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                          String name) throws IOException {
+  }
+
+  @Override
+  public void postAddGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                           String name) throws IOException {
+  }
+
+  @Override
+  public void preRemoveGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                             String name) throws IOException {
+  }
+
+  @Override
+  public void postRemoveGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                              String name) throws IOException {
+  }
+
+  @Override
+  public void preBalanceGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                              String groupName) throws IOException {
+  }
+
+  @Override
+  public void postBalanceGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                               String groupName, boolean balancerRan) throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/16f65bad/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index 23a098c..95885d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -21,12 +21,14 @@ package org.apache.hadoop.hbase.coprocessor;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HostPort;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -588,4 +590,54 @@ public class BaseMasterObserver implements MasterObserver {
   public void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final String namespace, final Quotas quotas) throws IOException {
   }
+
+  @Override
+  public void preMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                             Set<HostPort> servers, String targetGroup) throws IOException {
+  }
+
+  @Override
+  public void postMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                              Set<HostPort> servers, String targetGroup) throws IOException {
+  }
+
+  @Override
+  public void preMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                            Set<TableName> tables, String targetGroup) throws IOException {
+  }
+
+  @Override
+  public void postMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                             Set<TableName> tables, String targetGroup) throws IOException {
+  }
+
+  @Override
+  public void preAddGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                          String name) throws IOException {
+  }
+
+  @Override
+  public void postAddGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                           String name) throws IOException {
+  }
+
+  @Override
+  public void preRemoveGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                             String name) throws IOException {
+  }
+
+  @Override
+  public void postRemoveGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                              String name) throws IOException {
+  }
+
+  @Override
+  public void preBalanceGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                              String groupName) throws IOException {
+  }
+
+  @Override
+  public void postBalanceGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
+                               String groupName, boolean balancerRan) throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/16f65bad/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 3246d96..63202cc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -21,12 +21,14 @@ package org.apache.hadoop.hbase.coprocessor;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HostPort;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -1180,4 +1182,98 @@ public interface MasterObserver extends Coprocessor {
    */
   void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final String namespace, final Quotas quotas) throws IOException;
+
+  /**
+   * Called before servers are moved to target region server group
+   * @param ctx the environment to interact with the framework and master
+   * @param servers
+   * @param targetGroup
+   * @throws IOException
+   */
+  void preMoveServers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                      Set<HostPort> servers, String targetGroup) throws IOException;
+
+  /**
+   * Called after servers are moved to target region server group
+   * @param ctx the environment to interact with the framework and master
+   * @param servers
+   * @param targetGroup
+   * @throws IOException
+   */
+  void postMoveServers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                       Set<HostPort> servers, String targetGroup) throws IOException;
+
+  /**
+   * Called before tables are moved to target region server group
+   * @param ctx the environment to interact with the framework and master
+   * @param tables
+   * @param targetGroup
+   * @throws IOException
+   */
+  void preMoveTables(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                     Set<TableName> tables, String targetGroup) throws IOException;
+
+  /**
+   * Called after servers are moved to target region server group
+   * @param ctx the environment to interact with the framework and master
+   * @param tables
+   * @param targetGroup
+   * @throws IOException
+   */
+  void postMoveTables(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                      Set<TableName> tables, String targetGroup) throws IOException;
+
+  /**
+   * Called before a new region server group is added
+   * @param ctx the environment to interact with the framework and master
+   * @param name group name
+   * @throws IOException
+   */
+  void preAddGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                   String name) throws IOException;
+
+  /**
+   * Called after a new region server group is added
+   * @param ctx the environment to interact with the framework and master
+   * @param name group name
+   * @throws IOException
+   */
+  void postAddGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                    String name) throws IOException;
+
+  /**
+   * Called before a region server group is removed
+   * @param ctx the environment to interact with the framework and master
+   * @param name group name
+   * @throws IOException
+   */
+  void preRemoveGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                      String name) throws IOException;
+
+  /**
+   * Called after a region server group is removed
+   * @param ctx the environment to interact with the framework and master
+   * @param name group name
+   * @throws IOException
+   */
+  void postRemoveGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                       String name) throws IOException;
+
+  /**
+   * Called before a region server group is removed
+   * @param ctx the environment to interact with the framework and master
+   * @param groupName group name
+   * @throws IOException
+   */
+  void preBalanceGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                       String groupName) throws IOException;
+
+  /**
+   * Called after a region server group is removed
+   * @param ctx the environment to interact with the framework and master
+   * @param groupName group name
+   * @throws IOException
+   */
+  void postBalanceGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+                        String groupName, boolean balancerRan) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/16f65bad/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java
new file mode 100644
index 0000000..a0ab98f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java
@@ -0,0 +1,486 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.group;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HostPort;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.metrics.util.MBeanUtil;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * Service to support Region Server Grouping (HBase-6721)
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class GroupAdminServer implements GroupAdmin {
+  private static final Log LOG = LogFactory.getLog(GroupAdminServer.class);
+
+    private MasterServices master;
+  //List of servers that are being moved from one group to another
+  //Key=host:port,Value=targetGroup
+  ConcurrentMap<HostPort,String> serversInTransition =
+      new ConcurrentHashMap<HostPort, String>();
+  private GroupInfoManagerImpl groupInfoManager;
+
+  public GroupAdminServer(MasterServices master) throws IOException {
+    this.master = master;
+    groupInfoManager = new GroupInfoManagerImpl(master);
+    registerMBean();
+  }
+
+  @Override
+  public GroupInfo getGroupInfo(String groupName) throws IOException {
+    return getGroupInfoManager().getGroup(groupName);
+  }
+
+
+  @Override
+  public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException {
+    String groupName = getGroupInfoManager().getGroupOfTable(tableName);
+    if (groupName == null) {
+      if(master.getTableDescriptors().get(tableName) == null) {
+        throw new ConstraintException("Table "+tableName+" does not exist");
+      }
+      throw new ConstraintException("Table "+tableName+" has no group");
+    }
+    return getGroupInfoManager().getGroup(groupName);
+  }
+
+  @Override
+  public void moveServers(Set<HostPort> servers, String targetGroupName)
+      throws IOException {
+    if (servers == null) {
+      throw new DoNotRetryIOException(
+          "The list of servers cannot be null.");
+    }
+    if (StringUtils.isEmpty(targetGroupName)) {
+      throw new DoNotRetryIOException("The target group cannot be null.");
+    }
+    if(servers.size() < 1) {
+      return;
+    }
+
+    GroupInfo targetGrp = getGroupInfo(targetGroupName);
+    GroupInfoManager manager = getGroupInfoManager();
+    synchronized (manager) {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preMoveServers(servers, targetGroupName);
+      }
+      //we only allow a move from a single source group
+      //so this should be ok
+      GroupInfo srcGrp = manager.getGroupOfServer(servers.iterator().next());
+      //only move online servers (from default)
+      //or servers from other groups
+      //this prevents bogus servers from entering groups
+      if(GroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) {
+        Set<HostPort> onlineServers = new HashSet<HostPort>();
+        for(ServerName server: master.getServerManager().getOnlineServers().keySet()) {
+          onlineServers.add(server.getHostPort());
+        }
+        for(HostPort el: servers) {
+          if(!onlineServers.contains(el)) {
+            throw new DoNotRetryIOException(
+                "Server "+el+" is not an online server in default group.");
+          }
+        }
+      }
+
+      if(srcGrp.getServers().size() <= servers.size() &&
+          srcGrp.getTables().size() > 0) {
+        throw new DoNotRetryIOException("Cannot leave a group "+srcGrp.getName()+
+            " that contains tables " +"without servers.");
+      }
+
+      String sourceGroupName =
+          getGroupInfoManager().getGroupOfServer(srcGrp.getServers().iterator().next()).getName();
+      if(getGroupInfo(targetGroupName) == null) {
+        throw new ConstraintException("Target group does not exist: "+targetGroupName);
+      }
+
+      for(HostPort server: servers) {
+        if (serversInTransition.containsKey(server)) {
+          throw new DoNotRetryIOException(
+              "Server list contains a server that is already being moved: "+server);
+        }
+        String tmpGroup = getGroupInfoManager().getGroupOfServer(server).getName();
+        if (sourceGroupName != null && !tmpGroup.equals(sourceGroupName)) {
+          throw new DoNotRetryIOException(
+              "Move server request should only come from one source group. "+
+              "Expecting only "+sourceGroupName+" but contains "+tmpGroup);
+        }
+      }
+
+      if(sourceGroupName.equals(targetGroupName)) {
+        throw new ConstraintException(
+            "Target group is the same as source group: "+targetGroupName);
+      }
+
+      //update the servers as in transition
+      for(HostPort server: servers) {
+        serversInTransition.put(server, targetGroupName);
+      }
+
+      getGroupInfoManager().moveServers(servers, sourceGroupName, targetGroupName);
+      boolean found;
+      List<HostPort> tmpServers = Lists.newArrayList(servers);
+      do {
+        found = false;
+        for(Iterator<HostPort> iter = tmpServers.iterator();
+            iter.hasNext(); ) {
+          HostPort rs = iter.next();
+          //get online regions
+          List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
+          for(Map.Entry<HRegionInfo, ServerName> el:
+              master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
+            if (el.getValue().getHostPort().equals(rs)) {
+              regions.add(el.getKey());
+            }
+          }
+          for(RegionState state :
+              master.getAssignmentManager().getRegionStates().getRegionsInTransition().values()) {
+            if (state.getServerName().getHostPort().equals(rs)) {
+              regions.add(state.getRegion());
+            }
+          }
+
+          //unassign regions for a server
+          LOG.info("Unassigning "+regions.size()+
+              " regions from server "+rs+" for move to "+targetGroupName);
+          if(regions.size() > 0) {
+            //TODO bulk unassign or throttled unassign?
+            for(HRegionInfo region: regions) {
+              //regions might get assigned from tables of target group
+              //so we need to filter
+              if(!targetGrp.containsTable(region.getTable())) {
+                master.getAssignmentManager().unassign(region);
+                found = true;
+              }
+            }
+          }
+          if(!found) {
+            iter.remove();
+            serversInTransition.remove(rs);
+          }
+        }
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          LOG.warn("Sleep interrupted", e);
+        }
+      } while(found);
+
+      LOG.info("Move server done: "+sourceGroupName+"->"+targetGroupName);
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postMoveServers(servers, targetGroupName);
+      }
+    }
+  }
+
+  @Override
+  public void moveTables(Set<TableName> tables, String targetGroup) throws IOException {
+    if (tables == null) {
+      throw new ConstraintException(
+          "The list of servers cannot be null.");
+    }
+    if(tables.size() < 1) {
+      LOG.debug("moveTables() passed an empty set. Ignoring.");
+      return;
+    }
+    GroupInfoManager manager = getGroupInfoManager();
+    synchronized (manager) {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preMoveTables(tables, targetGroup);
+      }
+
+      if(targetGroup != null) {
+        GroupInfo destGroup = manager.getGroup(targetGroup);
+        if(destGroup == null) {
+          throw new ConstraintException("Target group does not exist: "+targetGroup);
+        }
+        if(destGroup.getServers().size() < 1) {
+          throw new ConstraintException("Target group must have at least one server.");
+        }
+      }
+
+      for(TableName table : tables) {
+        String srcGroup = manager.getGroupOfTable(table);
+        if(srcGroup != null && srcGroup.equals(targetGroup)) {
+          throw new ConstraintException("Source group is the same as target group for table "+table+" :"+srcGroup);
+        }
+      }
+      manager.moveTables(tables, targetGroup);
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postMoveTables(tables, targetGroup);
+      }
+    }
+    for(TableName table: tables) {
+      for(HRegionInfo region:
+          master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) {
+        master.getAssignmentManager().unassign(region);
+      }
+    }
+  }
+
+  @Override
+  public void addGroup(String name) throws IOException {
+    if (master.getMasterCoprocessorHost() != null) {
+      master.getMasterCoprocessorHost().preAddGroup(name);
+    }
+    getGroupInfoManager().addGroup(new GroupInfo(name));
+    if (master.getMasterCoprocessorHost() != null) {
+      master.getMasterCoprocessorHost().postAddGroup(name);
+    }
+  }
+
+  @Override
+  public void removeGroup(String name) throws IOException {
+    GroupInfoManager manager = getGroupInfoManager();
+    synchronized (manager) {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preRemoveGroup(name);
+      }
+      GroupInfo groupInfo = getGroupInfoManager().getGroup(name);
+      if(groupInfo == null) {
+        throw new DoNotRetryIOException("Group "+name+" does not exist");
+      }
+      int tableCount = groupInfo.getTables().size();
+      if (tableCount > 0) {
+        throw new DoNotRetryIOException("Group "+name+" must have no associated tables: "+tableCount);
+      }
+      int serverCount = groupInfo.getServers().size();
+      if(serverCount > 0) {
+        throw new DoNotRetryIOException("Group "+name+" must have no associated servers: "+serverCount);
+      }
+      for(NamespaceDescriptor ns: master.listNamespaceDescriptors()) {
+        String nsGroup = ns.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP);
+        if(nsGroup != null &&  nsGroup.equals(name)) {
+          throw new DoNotRetryIOException("Group "+name+" is referenced by namespace: "+ns.getName());
+        }
+      }
+      manager.removeGroup(name);
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postRemoveGroup(name);
+      }
+    }
+  }
+
+  @Override
+  public boolean balanceGroup(String groupName) throws IOException {
+    ServerManager serverManager = master.getServerManager();
+    AssignmentManager assignmentManager = master.getAssignmentManager();
+    LoadBalancer balancer = master.getLoadBalancer();
+
+    boolean balancerRan;
+    synchronized (balancer) {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preBalanceGroup(groupName);
+      }
+      // Only allow one balance run at at time.
+      Map<String, RegionState> groupRIT = groupGetRegionsInTransition(groupName);
+      if (groupRIT.size() > 0) {
+        LOG.debug("Not running balancer because " +
+          groupRIT.size() +
+          " region(s) in transition: " +
+          StringUtils.abbreviate(
+              master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(),
+              256));
+        return false;
+      }
+      if (serverManager.areDeadServersInProgress()) {
+        LOG.debug("Not running balancer because processing dead regionserver(s): " +
+            serverManager.getDeadServers());
+        return false;
+      }
+
+      //We balance per group instead of per table
+      List<RegionPlan> plans = new ArrayList<RegionPlan>();
+      for(Map.Entry<TableName, Map<ServerName, List<HRegionInfo>>> tableMap:
+          getGroupAssignmentsByTable(groupName).entrySet()) {
+        LOG.info("Creating partial plan for table "+tableMap.getKey()+": "+tableMap.getValue());
+        List<RegionPlan> partialPlans = balancer.balanceCluster(tableMap.getValue());
+        LOG.info("Partial plan for table "+tableMap.getKey()+": "+partialPlans);
+        if (partialPlans != null) {
+          plans.addAll(partialPlans);
+        }
+      }
+      long startTime = System.currentTimeMillis();
+      balancerRan = plans != null;
+      if (plans != null && !plans.isEmpty()) {
+        LOG.info("Group balance "+groupName+" starting with plan count: "+plans.size());
+        for (RegionPlan plan: plans) {
+          LOG.info("balance " + plan);
+          assignmentManager.balance(plan);
+        }
+        LOG.info("Group balance "+groupName+" completed after "+(System.currentTimeMillis()-startTime)+" seconds");
+      }
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postBalanceGroup(groupName, balancerRan);
+      }
+    }
+    return balancerRan;
+  }
+
+  @Override
+  public List<GroupInfo> listGroups() throws IOException {
+    return getGroupInfoManager().listGroups();
+  }
+
+  @Override
+  public GroupInfo getGroupOfServer(HostPort hostPort) throws IOException {
+    return getGroupInfoManager().getGroupOfServer(hostPort);
+  }
+
+  @InterfaceAudience.Private
+  public GroupInfoManager getGroupInfoManager() throws IOException {
+    return groupInfoManager;
+  }
+
+  private Map<String, RegionState> groupGetRegionsInTransition(String groupName)
+      throws IOException {
+    Map<String, RegionState> rit = Maps.newTreeMap();
+    AssignmentManager am = master.getAssignmentManager();
+    GroupInfo groupInfo = getGroupInfo(groupName);
+    for(TableName tableName : groupInfo.getTables()) {
+      for(HRegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) {
+        RegionState state =
+            master.getAssignmentManager().getRegionStates().getRegionTransitionState(regionInfo);
+        if(state != null) {
+          rit.put(regionInfo.getEncodedName(), state);
+        }
+      }
+    }
+    return rit;
+  }
+
+  private Map<TableName, Map<ServerName, List<HRegionInfo>>>
+      getGroupAssignmentsByTable(String groupName) throws IOException {
+    Map<TableName, Map<ServerName, List<HRegionInfo>>> result = Maps.newHashMap();
+    GroupInfo groupInfo = getGroupInfo(groupName);
+    Map<TableName, Map<ServerName, List<HRegionInfo>>> assignments = Maps.newHashMap();
+    for(Map.Entry<HRegionInfo, ServerName> entry:
+        master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
+      TableName currTable = entry.getKey().getTable();
+      ServerName currServer = entry.getValue();
+      HRegionInfo currRegion = entry.getKey();
+      if(groupInfo.getTables().contains(currTable)) {
+        if(!assignments.containsKey(entry.getKey().getTable())) {
+          assignments.put(currTable, new HashMap<ServerName, List<HRegionInfo>>());
+        }
+        if(!assignments.get(currTable).containsKey(currServer)) {
+          assignments.get(currTable).put(currServer, new ArrayList<HRegionInfo>());
+        }
+        assignments.get(currTable).get(currServer).add(currRegion);
+      }
+    }
+
+    Map<ServerName, List<HRegionInfo>> serverMap = Maps.newHashMap();
+    for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) {
+      if(groupInfo.getServers().contains(serverName.getHostPort())) {
+        serverMap.put(serverName, Collections.EMPTY_LIST);
+      }
+    }
+
+    //add all tables that are members of the group
+    for(TableName tableName : groupInfo.getTables()) {
+      if(assignments.containsKey(tableName)) {
+        result.put(tableName, new HashMap<ServerName, List<HRegionInfo>>());
+        result.get(tableName).putAll(serverMap);
+        result.get(tableName).putAll(assignments.get(tableName));
+        LOG.debug("Adding assignments for "+tableName+": "+assignments.get(tableName));
+      }
+    }
+
+    return result;
+  }
+
+  void registerMBean() {
+    MXBeanImpl mxBeanInfo =
+        MXBeanImpl.init(this, master);
+    MBeanUtil.registerMBean("Group", "Group", mxBeanInfo);
+    LOG.info("Registered Group MXBean");
+  }
+
+  public void prepareGroupForTable(HTableDescriptor desc) throws IOException {
+    String groupName =
+        master.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString())
+                .getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP);
+    if (groupName == null) {
+      groupName = GroupInfo.DEFAULT_GROUP;
+    }
+    GroupInfo groupInfo = getGroupInfo(groupName);
+    if (groupInfo == null) {
+      throw new ConstraintException("Group " + groupName + " does not exist.");
+    }
+    if (!groupInfo.containsTable(desc.getTableName())) {
+      LOG.debug("Pre-moving table " + desc.getTableName() + " to group " + groupName);
+      moveTables(Sets.newHashSet(desc.getTableName()), groupName);
+    }
+  }
+
+  public void cleanupGroupForTable(TableName tableName) throws IOException {
+    try {
+      GroupInfo group = getGroupInfoOfTable(tableName);
+      if (group != null) {
+        LOG.debug("Removing deleted table from table group " + group.getName());
+        moveTables(Sets.newHashSet(tableName), null);
+      }
+    } catch (ConstraintException ex) {
+      LOG.debug("Failed to perform group information cleanup for table: " + tableName, ex);
+    } catch (IOException ex) {
+      LOG.debug("Failed to perform group information cleanup for table: " + tableName, ex);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+  }
+}