You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2019/10/28 11:37:25 UTC

[hbase] branch HBASE-22514 updated (efb4c36 -> 0eb7657)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a change to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git.


    omit efb4c36  HBASE-23081 Add an option to enable/disable rs group feature (#691)
    omit 15c28d0  HBASE-22971 Deprecated RSGroupAdminEndpoint and make RSGroup feature always enabled (#595)
    omit 01485b9  HBASE-22987 Calculate the region servers in default group in foreground (#599)
    omit 11a200d  HBASE-22729 Start RSGroupInfoManager as default (#555)
    omit a47d2b5  HBASE-22819 Automatically migrate the rs group config for table after HBASE-22695 (#498)
    omit 14b245f  HBASE-22820 Do not need to persist default rs group now (#482)
    omit b1dd9a8  HBASE-22809 Allow creating table in group when rs group contains no live servers (#464)
    omit 932f8da  HBASE-22695 Store the rsgroup of a table in table configuration (#426)
    omit a3e1b17  HBASE-22676 Move all the code in hbase-rsgroup to hbase-server and remove hbase-rsgroup module (#399)
    omit 51075a1  HBASE-22662 Move RSGroupInfoManager to hbase-server (#368)
    omit 6b9357f  HBASE-22664 Move protobuf stuff in hbase-rsgroup to hbase-protocol-shaded (#362)
     add 95c9911  HBASE-23031 Upgrade Yetus version in RM scripts (#692)
     add 2144690  HBASE-23125 TestRSGroupsAdmin2 is flaky
     add 1366c4d  HBASE-23106 WAL tools doc cleanup; talk of WAL Reader/Verifier; link WALPlayer
     add 10180e2  HBASE-23094 Wrong log message in simpleRegionNormaliser while checking if merge is enabled.
     add 8859651  HBASE-22986 Add documentation for hbtop (#693)
     add d8ea42c  HBASE-23130 Add 2.1.7 to download page (#697)
     add ff52994  HBASE-23095 Reuse FileStatus in StoreFileInfo (#674)
     add e911bb7  HBASE-23134 Enable_all and Disable_all table by Regex fail from Shell (#698)
     add 57439e5  HBASE-23137 [create-release] Add passing of PROJECT variable for when building other than core (#701)
     add 92bf07a  HBASE-22767 System table RIT STUCK if their RSGroup has no highest version RSes Signed-off-by: stack <st...@apache.org>
     add 128b480  HBASE-22887 Fix HFileOutputFormat2 writer roll (#554)
     add 6e3a455  HBASE-23139 MapReduce jobs lauched from convenience distribution are nonfunctional (#705)
     add bb83e42  HBASE-23140 Remove unknown table error (#706)
     add d237106  HBASE-23138 Drop_all table by regex fail (#704)
     add fdac2dd  HBASE-23115 Unit change for StoreFileSize and MemstoreSize (table.jsp) (#682)
     add ba12d5b  HBASE-23123 Merge_region fails from shell (#690)
     add fec4c52  HBASE-23136 PartionedMobFileCompactor bulkloaded files shouldn't get replicated
     add fd9cfd7  HBASE-23114 Use archiveArtifacts in Jenkinsfiles (#681)
     add 16da123  HBASE-23017 Verify the file integrity in persistent IOEngine
     add f0b2212  HBASE-23144 Compact_rs throw wrong number of arguments
     add 2f0b3ac  HBASE-23093 Avoid Optional Anti-Pattern where possible (#673)
     add abc38d8  HBASE-23129 Move core to use hbase-thirdparty-3.1.1 (#695)
     add 38b06c7  Revert "HBASE-23136 PartionedMobFileCompactor bulkloaded files shouldn't get replicated"
     add 473816d  HBASE-23145 Remove out-of-date comments in StoreFlusher.java (#709)
     add 1aee5f0  HBASE-23083 Collect Executor status info periodically and report to (#664)
     add 5848e14  HBASE-23056 Block count is 0 when BucketCache using persistent IOEngine and retrieve from file
     add c0a09cc  HBASE-23152 Compaction_switch does not work by RegionServer name (#713)
     add 0659932  HBASE-23154 list_deadservers return incorrect no of rows (#717)
     add 73d69c6  HBASE-23153 PrimaryRegionCountSkewCostFunction SLB function should implement CostFunction#isNeeded (#714)
     add 9e628b7  HBASE-23159 HStore#getStorefilesSize may throw NPE
     add 6aec958  HBASE-23163 Refactor HStore.getStorefilesSize related methods (#719)
     add 7924ba3  HBASE-23155 May NPE when concurrent AsyncNonMetaRegionLocator#updateCachedLocationOnError (#718)
     add 17495d8  HBASE-20626 Change the value of "Requests Per Second" on WEBUI
     add 395cfce  HBASE-22370 ByteBuf LEAK ERROR (#720)
     add 820a416  HBASE-23177 If fail to open reference because FNFE, make it plain it is a Reference Signed-off-by: Duo Zhang <zh...@apache.org> Signed-off-by: Sean Busbey <bu...@apache.org> Signed-off-by: Viraj Jasani <vi...@gmail.com>
     add 0043dfe  HBASE-23107 Avoid temp byte array creation when doing cacheDataOnWrite (#678)
     add 0f910f0  HBASE-23176 delete_all_snapshot does not work with regex (#725)
     add 946f1e9  HBASE-23170 Admin#getRegionServers use ClusterMetrics.Option.SERVERS_NAME (#721)
     add 4d41402  HBASE-23136 (#712)
     add da9a53e  HBASE-23042 Parameters are incorrect in procedures jsp (#728)
     add af1910c  HBASE-23172 HBase Canary region success count metrics reflect column family successes, not region successes
     add 6e96738  HBASE-23012 Add HBase 1.3.6 to the downloads page (#738)
     add 2ad62b0  HBASE-22679 : Revamping CellUtil (#735)
     add 14dcf1d  HBASE-22460 : Reopen regions with very high Store Ref Counts (#600)
     add 59fc163  HBASE-15519 Add per-user metrics with lossy counting
     add b14ba58  HBASE-23207 Log a region open journal (#751)
     add 42d535a  HBASE-23073 Add an optional costFunction to balance regions according to a capacity rule (#677)
     add 65ee170  HBASE-23012 Addendum - Update 1.3.6 Release date in the downloads page
     add 8f92a14  HBASE-23203 NPE in RSGroup info (#747)
     add d7b90b3  HBASE-23194 Remove unused methods from TokenUtil (#737)
     add 50dc288  HBASE-22991 add HBase 1.4.11 to the downloads page.
     add 7b5cd01  HBASE-23181 Blocked WAL archive: "LogRoller: Failed to schedule flush of XXXX, because it is not online on us" (#753)
     add d7deafa  HBASE-23216 Add 2.2.2 to download page (#758)
     new 096d27e  HBASE-22664 Move protobuf stuff in hbase-rsgroup to hbase-protocol-shaded (#362)
     new 2bf3fd0  HBASE-22662 Move RSGroupInfoManager to hbase-server (#368)
     new ea154c0  HBASE-22676 Move all the code in hbase-rsgroup to hbase-server and remove hbase-rsgroup module (#399)
     new 93d38fa  HBASE-22695 Store the rsgroup of a table in table configuration (#426)
     new 9f4e594  HBASE-22809 Allow creating table in group when rs group contains no live servers (#464)
     new d3441b5  HBASE-22820 Do not need to persist default rs group now (#482)
     new af122d3  HBASE-22819 Automatically migrate the rs group config for table after HBASE-22695 (#498)
     new 6bb0e70  HBASE-22729 Start RSGroupInfoManager as default (#555)
     new 5d4786e  HBASE-22987 Calculate the region servers in default group in foreground (#599)
     new b274144  HBASE-22971 Deprecated RSGroupAdminEndpoint and make RSGroup feature always enabled (#595)
     new 0eb7657  HBASE-23081 Add an option to enable/disable rs group feature (#691)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (efb4c36)
            \
             N -- N -- N   refs/heads/HBASE-22514 (0eb7657)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 11 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 dev-support/Jenkinsfile                            |  24 +-
 dev-support/adhoc_run_tests/Jenkinsfile            |   6 +-
 dev-support/create-release/do-release-docker.sh    |   1 +
 dev-support/create-release/release-util.sh         |   2 +-
 .../flaky-tests/flaky-reporting.Jenkinsfile        |   2 +-
 .../flaky-tests/run-flaky-tests.Jenkinsfile        |   2 +-
 .../hadoop/hbase/AsyncMetaTableAccessor.java       | 117 ++--
 .../org/apache/hadoop/hbase/RegionMetrics.java     |   6 +
 .../apache/hadoop/hbase/RegionMetricsBuilder.java  |  18 +
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  |   7 +
 .../java/org/apache/hadoop/hbase/client/Admin.java |  10 +-
 .../hadoop/hbase/client/AdminOverAsyncAdmin.java   |   6 +
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |   4 +-
 .../hbase/client/AsyncMetaRegionLocator.java       |   3 +-
 .../hbase/client/AsyncNonMetaRegionLocator.java    |  23 +-
 .../hbase/client/AsyncRegionLocatorHelper.java     |   7 +-
 .../hbase/client/AsyncTableRegionLocatorImpl.java  |   3 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  23 +-
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |   8 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/RequestConverter.java    |   5 +-
 hbase-common/pom.xml                               |   4 +
 .../apache/hadoop/hbase/ByteBufferKeyValue.java    |   2 +-
 .../java/org/apache/hadoop/hbase/CellUtil.java     | 723 +--------------------
 .../java/org/apache/hadoop/hbase/HConstants.java   |  10 +
 .../org/apache/hadoop/hbase/PrivateCellUtil.java   |  31 +-
 .../hadoop/hbase/io/ByteArrayOutputStream.java     |   5 +
 .../io/encoding/BufferedDataBlockEncoder.java      |   2 +-
 .../hadoop/hbase/util/ImmutableByteArray.java      |   4 +-
 .../org/apache/hadoop/hbase/util/VersionInfo.java  |   4 +
 hbase-common/src/main/resources/hbase-default.xml  |  29 +
 hbase-common/src/saveVersion.sh                    |   4 +-
 .../java/org/apache/hadoop/hbase/TestCellUtil.java |  32 +-
 .../hbase/client/example/HttpProxyExample.java     |  24 +-
 .../regionserver/MetricsRegionServerSource.java    |   1 +
 .../MetricsRegionServerSourceFactory.java          |  12 +
 .../hbase/regionserver/MetricsRegionWrapper.java   |   6 +
 ...Source.java => MetricsUserAggregateSource.java} |  35 +-
 .../hbase/regionserver/MetricsUserSource.java      |  30 +-
 .../MetricsRegionServerSourceFactoryImpl.java      |  28 +-
 .../regionserver/MetricsRegionSourceImpl.java      |   4 +
 ...pl.java => MetricsUserAggregateSourceImpl.java} |  90 +--
 .../hbase/regionserver/MetricsUserSourceImpl.java  | 207 ++++++
 .../regionserver/TestMetricsRegionSourceImpl.java  |   5 +
 ...rceImpl.java => TestMetricsUserSourceImpl.java} |  48 +-
 hbase-hbtop/README.md                              | 248 -------
 hbase-hbtop/img/adding_filters.gif                 | Bin 2770341 -> 0 bytes
 hbase-hbtop/img/changing_displayed_fields.gif      | Bin 2620167 -> 0 bytes
 hbase-hbtop/img/changing_mode.gif                  | Bin 3341347 -> 0 bytes
 hbase-hbtop/img/changing_order_of_fields.gif       | Bin 2879724 -> 0 bytes
 hbase-hbtop/img/changing_refresh_delay.gif         | Bin 2292628 -> 0 bytes
 hbase-hbtop/img/changing_sort_field.gif            | Bin 3434988 -> 0 bytes
 hbase-hbtop/img/driling_down.gif                   | Bin 4003463 -> 0 bytes
 hbase-hbtop/img/help_screen.gif                    | Bin 930726 -> 0 bytes
 hbase-hbtop/img/scrolling_metric_records.gif       | Bin 6575199 -> 0 bytes
 hbase-hbtop/img/showing_and_clearing_filters.gif   | Bin 1375780 -> 0 bytes
 hbase-hbtop/img/top_screen.gif                     | Bin 2146437 -> 0 bytes
 .../chaos/actions/MoveRegionsOfTableAction.java    |   5 +-
 .../hbase/mapreduce/IntegrationTestBulkLoad.java   |   7 +-
 .../hbase/test/IntegrationTestBigLinkedList.java   |   6 +-
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  14 +-
 .../hadoop/hbase/mapreduce/TableMapReduceUtil.java |   1 +
 .../hadoop/hbase/util/MapReduceExtendedCell.java   |   2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java    |  23 +-
 .../hadoop/hbase/mapreduce/TestSyncTable.java      |   4 +-
 .../hbase/mapreduce/TestWALRecordReader.java       |  25 +-
 .../src/main/protobuf/BucketCacheEntry.proto       |   1 +
 .../src/main/protobuf/Client.proto                 |   1 +
 .../src/main/protobuf/ClusterStatus.proto          |   6 +
 hbase-protocol-shaded/src/main/protobuf/WAL.proto  |   1 +
 .../src/main/protobuf/ClusterStatus.proto          |   6 +
 .../hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon |  61 +-
 .../hbase/tmpl/master/RegionServerListTmpl.jamon   |  65 +-
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon   |  52 +-
 .../apache/hadoop/hbase/ExecutorStatusChore.java   |  86 +++
 .../hbase/client/AsyncClusterConnection.java       |  15 +-
 .../hbase/client/AsyncClusterConnectionImpl.java   |   4 +-
 .../hadoop/hbase/coprocessor/MetaTableMetrics.java |  15 +-
 .../hadoop/hbase/executor/ExecutorService.java     |   8 +
 .../java/org/apache/hadoop/hbase/io/FileLink.java  |   8 +-
 .../java/org/apache/hadoop/hbase/io/HFileLink.java |   3 +
 .../io/asyncfs/FanOutOneBlockAsyncDFSOutput.java   |   4 +
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |  54 +-
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java     |  14 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  23 +
 .../hbase/io/hfile/bucket/BucketProtoUtils.java    |  14 +-
 .../hadoop/hbase/io/hfile/bucket/FileIOEngine.java |  12 +-
 .../hbase/io/hfile/bucket/FileMmapIOEngine.java    |  11 +-
 .../hbase/io/hfile/bucket/PersistentIOEngine.java  | 116 ++++
 .../org/apache/hadoop/hbase/ipc/CallRunner.java    |   1 +
 .../org/apache/hadoop/hbase/master/HMaster.java    |  49 ++
 .../hbase/master/RegionPlacementMaintainer.java    |   6 +-
 .../hadoop/hbase/master/RegionsRecoveryChore.java  | 174 +++++
 .../hbase/master/assignment/AssignmentManager.java |  25 +-
 .../HeterogeneousRegionCountCostFunction.java      | 282 ++++++++
 .../master/balancer/StochasticLoadBalancer.java    |   9 +-
 .../master/normalizer/SimpleRegionNormalizer.java  |   2 +-
 .../procedure/ReopenTableRegionsProcedure.java     |  42 +-
 .../mob/compactions/PartitionedMobCompactor.java   |   4 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  24 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  56 +-
 .../apache/hadoop/hbase/regionserver/HStore.java   |  59 +-
 .../hbase/regionserver/MetricsRegionServer.java    |  28 +-
 .../MetricsRegionServerWrapperImpl.java            | 121 ++--
 .../regionserver/MetricsRegionWrapperImpl.java     |  15 +-
 ...StoresPolicy.java => MetricsUserAggregate.java} |  24 +-
 .../regionserver/MetricsUserAggregateFactory.java  |  70 ++
 .../regionserver/MetricsUserAggregateImpl.java     | 134 ++++
 .../hbase/regionserver/SecureBulkLoadManager.java  |   2 +-
 .../hadoop/hbase/regionserver/StoreFileInfo.java   |  37 +-
 .../hadoop/hbase/regionserver/StoreFlusher.java    |   3 -
 .../hadoop/hbase/regionserver/StoreScanner.java    |   7 +-
 .../hbase/regionserver/wal/AbstractFSWAL.java      |  53 +-
 .../hadoop/hbase/regionserver/wal/AsyncFSWAL.java  |  10 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java      |  10 +-
 .../hadoop/hbase/regionserver/wal/FSWALEntry.java  |  17 +-
 .../regionserver/wal/SequenceIdAccounting.java     |  27 +-
 .../hadoop/hbase/regionserver/wal/WALUtil.java     |  91 ++-
 .../RegionReplicaReplicationEndpoint.java          |   3 +-
 .../replication/regionserver/ReplicationSink.java  |  24 +-
 .../hadoop/hbase/security/token/TokenUtil.java     |  79 +--
 .../hadoop/hbase/snapshot/SnapshotManifestV2.java  |   6 +-
 .../apache/hadoop/hbase/tool/BulkLoadHFiles.java   |  14 +
 .../hadoop/hbase/tool/BulkLoadHFilesTool.java      |  14 +-
 .../org/apache/hadoop/hbase/tool/CanaryTool.java   |  52 +-
 .../apache/hadoop/hbase/util/CompressionTest.java  |  14 +-
 .../apache/hadoop/hbase/util/LossyCounting.java    |  60 +-
 .../org/apache/hadoop/hbase/util/RegionMover.java  |   8 +-
 .../apache/hadoop/hbase/util/RegionSplitter.java   |   6 +-
 .../hadoop/hbase/wal/DisabledWALProvider.java      |  14 +-
 .../main/java/org/apache/hadoop/hbase/wal/WAL.java |  51 +-
 .../resources/hbase-webapps/master/procedures.jsp  |   7 +-
 .../resources/hbase-webapps/master/rsgroup.jsp     |  71 +-
 .../main/resources/hbase-webapps/master/table.jsp  |  56 +-
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |   6 +-
 .../hadoop/hbase/HFilePerformanceEvaluation.java   |  18 +-
 .../hadoop/hbase/TestClientClusterMetrics.java     |   1 +
 .../hadoop/hbase/TestExecutorStatusChore.java      | 102 +++
 .../apache/hadoop/hbase/TestTagRewriteCell.java    |   9 +-
 .../hbase/client/DummyAsyncClusterConnection.java  |   2 +-
 .../org/apache/hadoop/hbase/client/TestAdmin.java  |  12 +
 .../hbase/client/TestAppendFromClientSide.java     |  12 +-
 .../hbase/client/TestAsyncClusterAdminApi.java     |   1 +
 .../client/TestAsyncNonMetaRegionLocator.java      |  10 +
 .../hbase/client/TestAsyncRegionAdminApi.java      |  20 +
 .../hbase/client/TestAsyncRegionAdminApi2.java     |  27 +-
 .../hbase/client/TestAsyncTableAdminApi.java       |  24 +-
 .../hbase/client/TestAsyncTableAdminApi3.java      |   9 +-
 .../hbase/client/TestIncrementsFromClientSide.java |  12 +-
 .../hbase/client/TestResultFromCoprocessor.java    |  11 +-
 .../hadoop/hbase/coprocessor/TestWALObserver.java  |  19 +-
 .../apache/hadoop/hbase/io/hfile/TestHFile.java    | 145 +++--
 .../hadoop/hbase/io/hfile/TestHFileBlock.java      |  26 +-
 .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java |  19 +-
 .../TestHFileInlineToRootChunkConversion.java      |  21 +-
 .../io/hfile/bucket/TestVerifyBucketCacheFile.java | 247 +++++++
 .../apache/hadoop/hbase/ipc/TestCallRunner.java    |  26 +
 .../hadoop/hbase/master/AbstractTestDLS.java       |   5 +-
 .../hbase/master/TestRegionsRecoveryChore.java     | 613 +++++++++++++++++
 ...estStochasticLoadBalancerHeterogeneousCost.java | 275 ++++++++
 ...ochasticLoadBalancerHeterogeneousCostRules.java | 161 +++++
 .../regionserver/MetricsRegionWrapperStub.java     |   5 +
 .../hadoop/hbase/regionserver/TestBulkLoad.java    |  24 +-
 .../regionserver/TestBulkLoadReplication.java      |  92 ++-
 .../hadoop/hbase/regionserver/TestHRegion.java     |  80 ++-
 .../regionserver/TestHRegionReplayEvents.java      |  25 +-
 .../regionserver/TestMetricsTableLatencies.java    |   1 +
 .../regionserver/TestMetricsUserAggregate.java     | 152 +++++
 .../hbase/regionserver/TestStoreFileInfo.java      |  48 +-
 .../hbase/regionserver/TestStoreScanner.java       | 118 +++-
 .../hadoop/hbase/regionserver/TestWALLockup.java   |   2 +-
 .../hbase/regionserver/wal/AbstractTestFSWAL.java  | 109 +++-
 .../regionserver/wal/AbstractTestWALReplay.java    |  18 +-
 .../regionserver/wal/ProtobufLogTestHelper.java    |   2 +-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java     |   2 +-
 .../hbase/regionserver/wal/TestFSWALEntry.java     |  48 +-
 .../hbase/regionserver/wal/TestLogRollAbort.java   |   5 +-
 .../regionserver/wal/TestLogRollingNoCluster.java  |   4 +-
 .../regionserver/wal/TestWALActionsListener.java   |   5 +-
 .../replication/TestReplicationSmallTests.java     |   2 +-
 .../regionserver/TestReplicationSourceManager.java |  16 +-
 .../regionserver/TestWALEntryStream.java           |   8 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java   |  28 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java     |  51 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsKillRS.java   |  54 ++
 .../hbase/snapshot/TestSnapshotStoreFileSize.java  | 124 ++++
 .../tool/TestBulkLoadHFilesSplitRecovery.java      |   3 +-
 .../apache/hadoop/hbase/tool/TestCanaryTool.java   |  41 +-
 .../hadoop/hbase/util/TestLossyCounting.java       |  20 +-
 .../org/apache/hadoop/hbase/wal/FaultyFSLog.java   |   6 +-
 .../hadoop/hbase/wal/TestFSHLogProvider.java       |   4 +-
 .../org/apache/hadoop/hbase/wal/TestSecureWAL.java |   4 +-
 .../apache/hadoop/hbase/wal/TestWALFactory.java    |  39 +-
 .../hadoop/hbase/wal/TestWALReaderOnSecureWAL.java |   4 +-
 .../apache/hadoop/hbase/wal/TestWALRootDir.java    |   7 +-
 .../hadoop/hbase/wal/WALPerformanceEvaluation.java |   2 +-
 hbase-shell/src/main/ruby/hbase/admin.rb           |  33 +-
 hbase-shell/src/main/ruby/hbase/quotas.rb          |   1 +
 .../main/ruby/shell/commands/compaction_switch.rb  |   4 +-
 .../ruby/shell/commands/delete_all_snapshot.rb     |   6 +-
 .../ruby/shell/commands/delete_table_snapshots.rb  |   3 +-
 .../main/ruby/shell/commands/describe_namespace.rb |  14 +-
 .../main/ruby/shell/commands/list_deadservers.rb   |   2 +-
 hbase-shell/src/test/ruby/hbase/admin_test.rb      |  57 ++
 .../hadoop/hbase/thrift2/ThriftUtilities.java      |   4 +-
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   |   5 +
 pom.xml                                            |   7 +-
 src/main/asciidoc/_chapters/hbase-default.adoc     |  40 ++
 src/main/asciidoc/_chapters/hbtop.adoc             | 269 ++++++++
 src/main/asciidoc/_chapters/ops_mgt.adoc           |  32 +
 src/main/asciidoc/book.adoc                        |   1 +
 src/site/xdoc/downloads.xml                        |  62 +-
 212 files changed, 5742 insertions(+), 2188 deletions(-)
 copy hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/{MetricsRegionAggregateSource.java => MetricsUserAggregateSource.java} (58%)
 copy hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllStoresPolicy.java => hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java (73%)
 copy hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/{MetricsTableAggregateSourceImpl.java => MetricsUserAggregateSourceImpl.java} (51%)
 create mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java
 copy hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/{TestMetricsTableSourceImpl.java => TestMetricsUserSourceImpl.java} (59%)
 delete mode 100644 hbase-hbtop/README.md
 delete mode 100644 hbase-hbtop/img/adding_filters.gif
 delete mode 100644 hbase-hbtop/img/changing_displayed_fields.gif
 delete mode 100644 hbase-hbtop/img/changing_mode.gif
 delete mode 100644 hbase-hbtop/img/changing_order_of_fields.gif
 delete mode 100644 hbase-hbtop/img/changing_refresh_delay.gif
 delete mode 100644 hbase-hbtop/img/changing_sort_field.gif
 delete mode 100644 hbase-hbtop/img/driling_down.gif
 delete mode 100644 hbase-hbtop/img/help_screen.gif
 delete mode 100644 hbase-hbtop/img/scrolling_metric_records.gif
 delete mode 100644 hbase-hbtop/img/showing_and_clearing_filters.gif
 delete mode 100644 hbase-hbtop/img/top_screen.gif
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java
 copy hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/{FlushAllStoresPolicy.java => MetricsUserAggregate.java} (77%)
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateFactory.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateImpl.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestExecutorStatusChore.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserAggregate.java
 create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java
 create mode 100644 src/main/asciidoc/_chapters/hbtop.adoc


[hbase] 11/11: HBASE-23081 Add an option to enable/disable rs group feature (#691)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 0eb7657ea0a593f2afe9a35e963f79f85582428c
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Mon Oct 7 17:34:33 2019 +0800

    HBASE-23081 Add an option to enable/disable rs group feature (#691)
    
    Signed-off-by: Peter Somogyi <ps...@apache.org>
---
 .../org/apache/hadoop/hbase/master/HMaster.java    |  12 +++
 .../hbase/rsgroup/DisabledRSGroupInfoManager.java  | 110 +++++++++++++++++++++
 .../hadoop/hbase/rsgroup/RSGroupInfoManager.java   |   8 +-
 .../org/apache/hadoop/hbase/TestNamespace.java     |   3 +-
 .../hadoop/hbase/master/AbstractTestDLS.java       |   4 +-
 .../hadoop/hbase/master/TestClusterRestart.java    |   2 +-
 .../hadoop/hbase/master/TestMasterMetrics.java     |   4 +-
 .../TestMasterRestartAfterDisablingTable.java      |   4 +-
 .../hadoop/hbase/master/TestRollingRestart.java    |   2 +-
 .../hadoop/hbase/regionserver/TestRegionOpen.java  |   1 -
 .../TestRegionReplicasWithRestartScenarios.java    |   2 +-
 .../regionserver/TestRegionServerMetrics.java      |   4 +-
 .../hbase/util/TestHBaseFsckReplication.java       |   2 -
 13 files changed, 140 insertions(+), 18 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 986557b..3a701b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -186,6 +186,7 @@ import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
 import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
 import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus;
+import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint;
 import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
 import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
@@ -778,6 +779,17 @@ public class HMaster extends HRegionServer implements MasterServices {
     this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);
     this.splitOrMergeTracker.start();
 
+    // This is for backwards compatible. We do not need the CP for rs group now but if user want to
+    // load it, we need to enable rs group.
+    String[] cpClasses = conf.getStrings(MasterCoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
+    if (cpClasses != null) {
+      for (String cpClass : cpClasses) {
+        if (RSGroupAdminEndpoint.class.getName().equals(cpClass)) {
+          conf.setBoolean(RSGroupInfoManager.RS_GROUP_ENABLED, true);
+          break;
+        }
+      }
+    }
     this.rsGroupInfoManager = RSGroupInfoManager.create(this);
 
     this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java
new file mode 100644
index 0000000..c7c521c
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.net.Address;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A dummy RSGroupInfoManager which only contains a default rs group.
+ */
+@InterfaceAudience.Private
+class DisabledRSGroupInfoManager implements RSGroupInfoManager {
+
+  private final ServerManager serverManager;
+
+  public DisabledRSGroupInfoManager(ServerManager serverManager) {
+    this.serverManager = serverManager;
+  }
+
+  @Override
+  public void start() {
+  }
+
+  @Override
+  public void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException {
+    throw new DoNotRetryIOException("RSGroup is disabled");
+  }
+
+  @Override
+  public void removeRSGroup(String groupName) throws IOException {
+    throw new DoNotRetryIOException("RSGroup is disabled");
+  }
+
+  @Override
+  public Set<Address> moveServers(Set<Address> servers, String srcGroup, String dstGroup)
+    throws IOException {
+    throw new DoNotRetryIOException("RSGroup is disabled");
+  }
+
+  private SortedSet<Address> getOnlineServers() {
+    SortedSet<Address> onlineServers = new TreeSet<Address>();
+    serverManager.getOnlineServers().keySet().stream().map(ServerName::getAddress)
+      .forEach(onlineServers::add);
+    return onlineServers;
+  }
+
+  @Override
+  public RSGroupInfo getRSGroupOfServer(Address serverHostPort) throws IOException {
+    SortedSet<Address> onlineServers = getOnlineServers();
+    if (onlineServers.contains(serverHostPort)) {
+      return new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, onlineServers);
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public RSGroupInfo getRSGroup(String groupName) throws IOException {
+    if (RSGroupInfo.DEFAULT_GROUP.equals(groupName)) {
+      return new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getOnlineServers());
+    } else {
+      return null;
+    }
+  }
+
+  @Override
+  public List<RSGroupInfo> listRSGroups() throws IOException {
+    return Arrays.asList(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getOnlineServers()));
+  }
+
+  @Override
+  public boolean isOnline() {
+    return true;
+  }
+
+  @Override
+  public void removeServers(Set<Address> servers) throws IOException {
+    throw new DoNotRetryIOException("RSGroup is disabled");
+  }
+
+  @Override
+  public RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException {
+    return null;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index a46fa4b..3f73f78 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -32,6 +32,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public interface RSGroupInfoManager {
 
+  static final String RS_GROUP_ENABLED = "hbase.balancer.rsgroup.enabled";
+
   void start();
 
   /**
@@ -90,6 +92,10 @@ public interface RSGroupInfoManager {
   RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException;
 
   static RSGroupInfoManager create(MasterServices master) throws IOException {
-    return RSGroupInfoManagerImpl.getInstance(master);
+    if (master.getConfiguration().getBoolean(RS_GROUP_ENABLED, false)) {
+      return RSGroupInfoManagerImpl.getInstance(master);
+    } else {
+      return new DisabledRSGroupInfoManager(master.getServerManager());
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
index df39862..34ff391 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
@@ -114,8 +114,7 @@ public class TestNamespace {
     assertEquals(2, admin.listNamespaceDescriptors().length);
 
     //verify existence of system tables
-    Set<TableName> systemTables = Sets.newHashSet(
-        TableName.META_TABLE_NAME, TableName.valueOf("hbase:rsgroup"));
+    Set<TableName> systemTables = Sets.newHashSet(TableName.META_TABLE_NAME);
     List<TableDescriptor> descs = admin.listTableDescriptorsByNamespace(
       Bytes.toBytes(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()));
     assertEquals(systemTables.size(), descs.size());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
index fc2e9f7..a576adc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
@@ -504,14 +504,14 @@ public abstract class AbstractTestDLS {
       for (String oregion : regions)
         LOG.debug("Region still online: " + oregion);
     }
-    assertEquals(2 + existingRegions, regions.size());
+    assertEquals(1 + existingRegions, regions.size());
     LOG.debug("Enabling table\n");
     TEST_UTIL.getAdmin().enableTable(tableName);
     LOG.debug("Waiting for no more RIT\n");
     blockUntilNoRIT();
     LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n");
     regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
-    assertEquals(numRegions + 2 + existingRegions, regions.size());
+    assertEquals(numRegions + 1 + existingRegions, regions.size());
     return table;
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java
index a8d80b6..e880f97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java
@@ -43,7 +43,7 @@ public class TestClusterRestart extends AbstractTestRestartCluster {
 
   private static final Logger LOG = LoggerFactory.getLogger(TestClusterRestart.class);
 
-  private static final int NUM_REGIONS = 4;
+  private static final int NUM_REGIONS = 3;
 
   @Override
   protected boolean splitWALCoordinatedByZk() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
index 61720df..75d9ee1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.StartMiniClusterOption;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -91,7 +90,6 @@ public class TestMasterMetrics {
     cluster = TEST_UTIL.getHBaseCluster();
     LOG.info("Waiting for active/ready master");
     cluster.waitForActiveAndReadyMaster();
-    TEST_UTIL.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
     master = cluster.getMaster();
   }
 
@@ -133,7 +131,7 @@ public class TestMasterMetrics {
     MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
     boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
     metricsHelper.assertGauge("numRegionServers", 1 + (tablesOnMaster ? 1 : 0), masterSource);
-    metricsHelper.assertGauge("averageLoad", 2, masterSource);
+    metricsHelper.assertGauge("averageLoad", 1, masterSource);
     metricsHelper.assertGauge("numDeadRegionServers", 0, masterSource);
 
     metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
index 2d0af3e..fca2866 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
@@ -92,7 +92,7 @@ public class TestMasterRestartAfterDisablingTable {
 
     NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
     assertEquals("The number of regions for the table tableRestart should be 0 and only" +
-      "the catalog table should be present.", 2, regions.size());
+      "the catalog table should be present.", 1, regions.size());
 
     List<MasterThread> masterThreads = cluster.getMasterThreads();
     MasterThread activeMaster = null;
@@ -120,7 +120,7 @@ public class TestMasterRestartAfterDisablingTable {
     log("Verifying there are " + numRegions + " assigned on cluster\n");
     regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
     assertEquals("The assigned regions were not onlined after master" +
-      " switch except for the catalog table.", 6, regions.size());
+      " switch except for the catalog table.", 5, regions.size());
     assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager()
       .isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
     ht.close();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
index 2dc1b6c..0aba487 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
@@ -121,7 +121,7 @@ public class TestRollingRestart {
         log("Region still online: " + oregion);
       }
     }
-    assertEquals(2, regions.size());
+    assertEquals(1, regions.size());
     log("Enabling table\n");
     TEST_UTIL.getAdmin().enableTable(tableName);
     log("Waiting for no more RIT\n");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
index 2503825..7d52508 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
@@ -86,7 +86,6 @@ public class TestRegionOpen {
     final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName());
     ThreadPoolExecutor exec = getRS().getExecutorService()
         .getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION);
-    HTU.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
     long completed = exec.getCompletedTaskCount();
 
     HTableDescriptor htd = new HTableDescriptor(tableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
index dc0a918..70df61e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
@@ -146,7 +146,7 @@ public class TestRegionReplicasWithRestartScenarios {
     checkDuplicates(onlineRegions3);
     assertFalse(res);
     int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size();
-    assertEquals(62, totalRegions);
+    assertEquals(61, totalRegions);
   }
 
   private boolean checkDuplicates(Collection<HRegion> onlineRegions3) throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index ab7ff16..b57ff41 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -225,7 +225,7 @@ public class TestRegionServerMetrics {
 
   @Test
   public void testRegionCount() throws Exception {
-    metricsHelper.assertGauge("regionCount", TABLES_ON_MASTER ? 1 : 3, serverSource);
+    metricsHelper.assertGauge("regionCount", TABLES_ON_MASTER ? 1 : 2, serverSource);
   }
 
   @Test
@@ -335,7 +335,7 @@ public class TestRegionServerMetrics {
     TEST_UTIL.getAdmin().flush(tableName);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    assertGauge("storeCount", TABLES_ON_MASTER ? 1 : 6);
+    assertGauge("storeCount", TABLES_ON_MASTER ? 1 : 5);
     assertGauge("storeFileCount", 1);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 0ac07d8..316a321 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -24,7 +24,6 @@ import java.util.stream.Stream;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
@@ -53,7 +52,6 @@ public class TestHBaseFsckReplication {
   public static void setUp() throws Exception {
     UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
     UTIL.startMiniCluster(1);
-    UTIL.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
   }
 
   @AfterClass


[hbase] 02/11: HBASE-22662 Move RSGroupInfoManager to hbase-server (#368)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 2bf3fd027b3996ffa8780dbbff643ddea8f6ac90
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Thu Jul 11 10:34:05 2019 +0800

    HBASE-22662 Move RSGroupInfoManager to hbase-server (#368)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 hbase-rsgroup/pom.xml                              |   4 -
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java    |   4 +-
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java     |   6 +-
 .../hbase/rsgroup/VerifyingRSGroupAdminClient.java |   6 +-
 .../hadoop/hbase/rsgroup/RSGroupInfoManager.java   |  26 +----
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      | 108 +++++++++++----------
 6 files changed, 71 insertions(+), 83 deletions(-)

diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml
index 09c8e73..1135939 100644
--- a/hbase-rsgroup/pom.xml
+++ b/hbase-rsgroup/pom.xml
@@ -113,10 +113,6 @@
       <artifactId>hbase-shaded-miscellaneous</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.google.protobuf</groupId>
-      <artifactId>protobuf-java</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
     </dependency>
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index 6767ac9..7394f55 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -121,8 +121,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   public List<RegionPlan> balanceCluster(Map<ServerName, List<RegionInfo>> clusterState)
       throws HBaseIOException {
     if (!isOnline()) {
-      throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME +
-          " is not online, unable to perform balance");
+      throw new ConstraintException(
+          RSGroupInfoManager.class.getSimpleName() + " is not online, unable to perform balance");
     }
 
     // Calculate correct assignments and a list of RegionPlan for mis-placed regions
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
index 39cf164..60887e4 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
@@ -140,7 +140,7 @@ public class TestRSGroupsOfflineMode {
       }
     });
     // Move table to group and wait.
-    groupAdmin.moveTables(Sets.newHashSet(RSGroupInfoManager.RSGROUP_TABLE_NAME), newGroup);
+    groupAdmin.moveTables(Sets.newHashSet(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME), newGroup);
     LOG.info("Waiting for move table...");
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
@@ -169,7 +169,7 @@ public class TestRSGroupsOfflineMode {
     // Make sure balancer is in offline mode, since this is what we're testing.
     assertFalse(groupMgr.isOnline());
     // Verify the group affiliation that's loaded from ZK instead of tables.
-    assertEquals(newGroup, groupMgr.getRSGroupOfTable(RSGroupInfoManager.RSGROUP_TABLE_NAME));
+    assertEquals(newGroup, groupMgr.getRSGroupOfTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME));
     assertEquals(RSGroupInfo.DEFAULT_GROUP, groupMgr.getRSGroupOfTable(failoverTable));
     // Kill final regionserver to see the failover happens for all tables except GROUP table since
     // it's group does not have any online RS.
@@ -182,7 +182,7 @@ public class TestRSGroupsOfflineMode {
         return failoverRS.getRegions(failoverTable).size() >= 1;
       }
     });
-    Assert.assertEquals(0, failoverRS.getRegions(RSGroupInfoManager.RSGROUP_TABLE_NAME).size());
+    Assert.assertEquals(0, failoverRS.getRegions(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME).size());
 
     // Need this for minicluster to shutdown cleanly.
     master.stopMaster();
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index 2ad30e4..fcaf1a7 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -52,7 +52,7 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
       throws IOException {
     wrapped = RSGroupAdmin;
     table = ConnectionFactory.createConnection(conf)
-            .getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME);
+        .getTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME);
     zkw = new ZKWatcher(conf, this.getClass().getSimpleName(), null);
   }
 
@@ -126,8 +126,8 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
       RSGroupProtos.RSGroupInfo proto =
           RSGroupProtos.RSGroupInfo.parseFrom(
               result.getValue(
-                  RSGroupInfoManager.META_FAMILY_BYTES,
-                  RSGroupInfoManager.META_QUALIFIER_BYTES));
+                  RSGroupInfoManagerImpl.META_FAMILY_BYTES,
+                  RSGroupInfoManagerImpl.META_QUALIFIER_BYTES));
       groupMap.put(proto.getName(), ProtobufUtil.toGroupInfo(proto));
     }
     Assert.assertEquals(Sets.newHashSet(groupMap.values()),
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
similarity index 82%
rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 398e8a4..70aeabf 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -15,38 +15,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.rsgroup;
 
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
-
-import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * Interface used to manage RSGroupInfo storage. An implementation
- * has the option to support offline mode.
- * See {@link RSGroupBasedLoadBalancer}
+ * Interface used to manage RSGroupInfo storage. An implementation has the option to support offline
+ * mode. See {@code RSGroupBasedLoadBalancer}.
  */
 @InterfaceAudience.Private
 public interface RSGroupInfoManager {
 
-  String REASSIGN_WAIT_INTERVAL_KEY = "hbase.rsgroup.reassign.wait";
-  long DEFAULT_REASSIGN_WAIT_INTERVAL = 30 * 1000L;
-
-  //Assigned before user tables
-  TableName RSGROUP_TABLE_NAME =
-      TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup");
-  String rsGroupZNode = "rsgroup";
-  byte[] META_FAMILY_BYTES = Bytes.toBytes("m");
-  byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i");
-  byte[] ROW_KEY = {0};
-
   void start();
 
   /**
@@ -86,7 +70,6 @@ public interface RSGroupInfoManager {
 
   /**
    * Set the group membership of a set of tables
-   *
    * @param tableNames set of tables to move
    * @param groupName name of group of tables to move to
    */
@@ -104,7 +87,6 @@ public interface RSGroupInfoManager {
 
   /**
    * Whether the manager is able to fully return group metadata
-   *
    * @return whether the manager is in online mode
    */
   boolean isOnline();
@@ -116,8 +98,8 @@ public interface RSGroupInfoManager {
    * @param srcGroup groupName being moved from
    * @param dstGroup groupName being moved to
    */
-  void moveServersAndTables(Set<Address> servers, Set<TableName> tables,
-      String srcGroup, String dstGroup) throws IOException;
+  void moveServersAndTables(Set<Address> servers, Set<TableName> tables, String srcGroup,
+      String dstGroup) throws IOException;
 
   /**
    * Remove decommissioned servers from rsgroup
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
similarity index 90%
rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index b54f088..8aa7520 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.rsgroup;
 
-import com.google.protobuf.ServiceException;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -34,10 +33,12 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncTable;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -45,14 +46,11 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerListener;
 import org.apache.hadoop.hbase.master.TableStateManager;
@@ -62,10 +60,14 @@ import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -75,6 +77,7 @@ import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@@ -87,13 +90,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
  * RSGroupInfo Map at {@link #rsGroupMap} and a Map of tables to the name of the rsgroup they belong
  * too (in {@link #tableMap}). These Maps are persisted to the hbase:rsgroup table (and cached in
  * zk) on each modification.
- * <p>
+ * <p/>
  * Mutations on state are synchronized but reads can continue without having to wait on an instance
  * monitor, mutations do wholesale replace of the Maps on update -- Copy-On-Write; the local Maps of
  * state are read-only, just-in-case (see flushConfig).
- * <p>
+ * <p/>
  * Reads must not block else there is a danger we'll deadlock.
- * <p>
+ * <p/>
  * Clients of this class, the {@link RSGroupAdminEndpoint} for example, want to query and then act
  * on the results of the query modifying cache in zookeeper without another thread making
  * intermediate modifications. These clients synchronize on the 'this' instance so no other has
@@ -103,6 +106,24 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   private static final Logger LOG = LoggerFactory.getLogger(RSGroupInfoManagerImpl.class);
 
+  private static final String REASSIGN_WAIT_INTERVAL_KEY = "hbase.rsgroup.reassign.wait";
+  private static final long DEFAULT_REASSIGN_WAIT_INTERVAL = 30 * 1000L;
+
+  // Assigned before user tables
+  @VisibleForTesting
+  static final TableName RSGROUP_TABLE_NAME =
+      TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup");
+
+  private static final String RS_GROUP_ZNODE = "rsgroup";
+
+  @VisibleForTesting
+  static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m");
+
+  @VisibleForTesting
+  static final byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i");
+
+  private static final byte[] ROW_KEY = { 0 };
+
   /** Table descriptor for <code>hbase:rsgroup</code> catalog table */
   private static final TableDescriptor RSGROUP_TABLE_DESC;
   static {
@@ -125,7 +146,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   private volatile Map<TableName, String> tableMap = Collections.emptyMap();
 
   private final MasterServices masterServices;
-  private final Connection conn;
+  private final AsyncClusterConnection conn;
   private final ZKWatcher watcher;
   private final RSGroupStartupWorker rsGroupStartupWorker;
   // contains list of groups that were last flushed to persistent store
@@ -136,7 +157,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   private RSGroupInfoManagerImpl(MasterServices masterServices) throws IOException {
     this.masterServices = masterServices;
     this.watcher = masterServices.getZooKeeper();
-    this.conn = masterServices.getConnection();
+    this.conn = masterServices.getAsyncClusterConnection();
     this.rsGroupStartupWorker = new RSGroupStartupWorker();
   }
 
@@ -349,25 +370,25 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     }
   }
 
-  List<RSGroupInfo> retrieveGroupListFromGroupTable() throws IOException {
+  private List<RSGroupInfo> retrieveGroupListFromGroupTable() throws IOException {
     List<RSGroupInfo> rsGroupInfoList = Lists.newArrayList();
-    try (Table table = conn.getTable(RSGROUP_TABLE_NAME);
-        ResultScanner scanner = table.getScanner(new Scan())) {
+    AsyncTable<?> table = conn.getTable(RSGROUP_TABLE_NAME);
+    try (ResultScanner scanner = table.getScanner(META_FAMILY_BYTES, META_QUALIFIER_BYTES)) {
       for (Result result;;) {
         result = scanner.next();
         if (result == null) {
           break;
         }
         RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo
-          .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES));
+            .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES));
         rsGroupInfoList.add(ProtobufUtil.toGroupInfo(proto));
       }
     }
     return rsGroupInfoList;
   }
 
-  List<RSGroupInfo> retrieveGroupListFromZookeeper() throws IOException {
-    String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, rsGroupZNode);
+  private List<RSGroupInfo> retrieveGroupListFromZookeeper() throws IOException {
+    String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE);
     List<RSGroupInfo> RSGroupInfoList = Lists.newArrayList();
     // Overwrite any info stored by table, this takes precedence
     try {
@@ -519,7 +540,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     resetRSGroupAndTableMaps(newGroupMap, newTableMap);
 
     try {
-      String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, rsGroupZNode);
+      String groupBasePath =
+          ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE);
       ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC);
 
       List<ZKUtil.ZKUtilOp> zkOps = new ArrayList<>(newGroupMap.size());
@@ -702,11 +724,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
             createRSGroupTable();
           }
           // try reading from the table
-          try (Table table = conn.getTable(RSGROUP_TABLE_NAME)) {
-            table.get(new Get(ROW_KEY));
-          }
-          LOG.info(
-            "RSGroup table=" + RSGROUP_TABLE_NAME + " is online, refreshing cached information");
+          FutureUtils.get(conn.getTable(RSGROUP_TABLE_NAME).get(new Get(ROW_KEY)));
+          LOG.info("RSGroup table={} is online, refreshing cached information", RSGROUP_TABLE_NAME);
           RSGroupInfoManagerImpl.this.refresh(true);
           online = true;
           // flush any inconsistencies between ZK and HTable
@@ -748,8 +767,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       } else {
         Procedure<?> result = masterServices.getMasterProcedureExecutor().getResult(procId);
         if (result != null && result.isFailed()) {
-          throw new IOException(
-            "Failed to create group table. " + MasterProcedureUtil.unwrapRemoteIOException(result));
+          throw new IOException("Failed to create group table. " +
+              MasterProcedureUtil.unwrapRemoteIOException(result));
         }
       }
     }
@@ -764,33 +783,24 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   }
 
   private void multiMutate(List<Mutation> mutations) throws IOException {
-    try (Table table = conn.getTable(RSGROUP_TABLE_NAME)) {
-      CoprocessorRpcChannel channel = table.coprocessorService(ROW_KEY);
-      MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder =
-        MultiRowMutationProtos.MutateRowsRequest.newBuilder();
-      for (Mutation mutation : mutations) {
-        if (mutation instanceof Put) {
-          mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(
-            org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.PUT,
-            mutation));
-        } else if (mutation instanceof Delete) {
-          mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(
-            org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.DELETE,
-            mutation));
-        } else {
-          throw new DoNotRetryIOException(
+    MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
+    for (Mutation mutation : mutations) {
+      if (mutation instanceof Put) {
+        builder
+            .addMutationRequest(ProtobufUtil.toMutation(MutationProto.MutationType.PUT, mutation));
+      } else if (mutation instanceof Delete) {
+        builder.addMutationRequest(
+          ProtobufUtil.toMutation(MutationProto.MutationType.DELETE, mutation));
+      } else {
+        throw new DoNotRetryIOException(
             "multiMutate doesn't support " + mutation.getClass().getName());
-        }
-      }
-
-      MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
-        MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
-      try {
-        service.mutateRows(null, mmrBuilder.build());
-      } catch (ServiceException ex) {
-        ProtobufUtil.toIOException(ex);
       }
     }
+    MutateRowsRequest request = builder.build();
+    AsyncTable<?> table = conn.getTable(RSGROUP_TABLE_NAME);
+    FutureUtils.get(table.<MultiRowMutationService, MutateRowsResponse> coprocessorService(
+      MultiRowMutationService::newStub,
+      (stub, controller, done) -> stub.mutateRows(controller, request, done), ROW_KEY));
   }
 
   private void checkGroupName(String groupName) throws ConstraintException {


[hbase] 10/11: HBASE-22971 Deprecated RSGroupAdminEndpoint and make RSGroup feature always enabled (#595)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit b274144d11b7455d650ebf3e5d51fee1d4599cd9
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Sep 24 12:09:12 2019 +0800

    HBASE-22971 Deprecated RSGroupAdminEndpoint and make RSGroup feature always enabled (#595)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../hbase/favored/FavoredNodeLoadBalancer.java     |   1 +
 .../hadoop/hbase/favored/FavoredNodesPromoter.java |   2 +
 .../org/apache/hadoop/hbase/master/HMaster.java    |  43 +++----
 .../apache/hadoop/hbase/master/LoadBalancer.java   |   9 ++
 .../hadoop/hbase/master/MasterRpcServices.java     |  23 ++--
 .../apache/hadoop/hbase/master/MasterServices.java |   2 +-
 .../hbase/master/assignment/AssignmentManager.java |  15 ++-
 .../master/balancer/FavoredStochasticBalancer.java |   1 +
 .../hbase/master/balancer/LoadBalancerFactory.java |  18 ++-
 .../AbstractStateMachineNamespaceProcedure.java    |  11 ++
 .../master/procedure/CreateNamespaceProcedure.java |   1 +
 .../master/procedure/CreateTableProcedure.java     |  27 +++-
 .../master/procedure/MasterProcedureUtil.java      |  50 +++++++-
 .../master/procedure/ModifyNamespaceProcedure.java |  19 ++-
 .../master/procedure/ModifyTableProcedure.java     |  15 ++-
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 141 +--------------------
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java    |  65 ++++++----
 .../hadoop/hbase/rsgroup/RSGroupableBalancer.java  |  32 -----
 .../org/apache/hadoop/hbase/TestNamespace.java     |   2 +-
 .../hadoop/hbase/master/AbstractTestDLS.java       |   4 +-
 .../hbase/master/MockNoopMasterServices.java       |   2 +-
 .../hadoop/hbase/master/TestClusterRestart.java    |   6 +-
 .../hadoop/hbase/master/TestMasterMetrics.java     |   4 +-
 .../TestMasterRestartAfterDisablingTable.java      |   4 +-
 .../hadoop/hbase/master/TestRollingRestart.java    |   2 +-
 .../TestFavoredStochasticBalancerPickers.java      |   2 +-
 .../hadoop/hbase/regionserver/TestRegionOpen.java  |   2 +-
 .../TestRegionReplicasWithRestartScenarios.java    |   2 +-
 .../regionserver/TestRegionServerMetrics.java      |   4 +-
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java     |   3 +-
 .../security/access/TestTablePermissions.java      |   3 +-
 .../hbase/util/TestHBaseFsckReplication.java       |   2 +
 32 files changed, 241 insertions(+), 276 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
index b8d4b09..52a37a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
@@ -321,6 +321,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
     regionsOnServer.add(region);
   }
 
+  @Override
   public synchronized List<ServerName> getFavoredNodes(RegionInfo regionInfo) {
     return this.fnm.getFavoredNodes(regionInfo);
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java
index 322eb1df..a24fce0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java
@@ -35,4 +35,6 @@ public interface FavoredNodesPromoter {
 
   void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents)
       throws IOException;
+
+  List<ServerName> getFavoredNodes(RegionInfo regionInfo);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 8282f12..986557b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -91,7 +91,6 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
-import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
 import org.apache.hadoop.hbase.http.InfoServer;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -107,7 +106,6 @@ import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
 import org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
 import org.apache.hadoop.hbase.master.balancer.BalancerChore;
-import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
@@ -188,6 +186,7 @@ import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
 import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
 import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus;
+import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
 import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.SecurityConstants;
@@ -385,7 +384,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   private final LockManager lockManager = new LockManager(this);
 
-  private LoadBalancer balancer;
+  private RSGroupBasedLoadBalancer balancer;
   private RegionNormalizer normalizer;
   private BalancerChore balancerChore;
   private RegionNormalizerChore normalizerChore;
@@ -441,9 +440,6 @@ public class HMaster extends HRegionServer implements MasterServices {
   private long splitPlanCount;
   private long mergePlanCount;
 
-  /* Handle favored nodes information */
-  private FavoredNodesManager favoredNodesManager;
-
   /** jetty server for master to redirect requests to regionserver infoServer */
   private Server masterJettyServer;
 
@@ -768,7 +764,8 @@ public class HMaster extends HRegionServer implements MasterServices {
   @VisibleForTesting
   protected void initializeZKBasedSystemTrackers()
       throws IOException, InterruptedException, KeeperException, ReplicationException {
-    this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
+    this.balancer = new RSGroupBasedLoadBalancer();
+    this.balancer.setConf(conf);
     this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);
     this.normalizer.setMasterServices(this);
     this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);
@@ -1052,9 +1049,6 @@ public class HMaster extends HRegionServer implements MasterServices {
         return temp;
       });
     }
-    if (this.balancer instanceof FavoredNodesPromoter) {
-      favoredNodesManager = new FavoredNodesManager(this);
-    }
 
     // initialize load balancer
     this.balancer.setMasterServices(this);
@@ -1104,11 +1098,11 @@ public class HMaster extends HRegionServer implements MasterServices {
     // table states messing up master launch (namespace table, etc., are not assigned).
     this.assignmentManager.processOfflineRegions();
     // Initialize after meta is up as below scans meta
-    if (favoredNodesManager != null && !maintenanceMode) {
+    if (getFavoredNodesManager() != null && !maintenanceMode) {
       SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
           new SnapshotOfRegionAssignmentFromMeta(getConnection());
       snapshotOfRegionAssignment.initialize();
-      favoredNodesManager.initialize(snapshotOfRegionAssignment);
+      getFavoredNodesManager().initialize(snapshotOfRegionAssignment);
     }
 
     // set cluster status again after user regions are assigned
@@ -2055,14 +2049,13 @@ public class HMaster extends HRegionServer implements MasterServices {
         LOG.debug("Unable to determine a plan to assign " + hri);
         return;
       }
-      // TODO: What is this? I don't get it.
-      if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
-          && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
+      // TODO: deal with table on master for rs group.
+      if (dest.equals(serverName)) {
         // To avoid unnecessary region moving later by balancer. Don't put user
         // regions on master.
-        LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
-          + " to avoid unnecessary region moving later by load balancer,"
-          + " because it should not be on master");
+        LOG.debug("Skipping move of region " + hri.getRegionNameAsString() +
+          " to avoid unnecessary region moving later by load balancer," +
+          " because it should not be on master");
         return;
       }
     }
@@ -3514,12 +3507,14 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   /**
    * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
-   *
+   * <p/>
+   * Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so
+   * this method will return the balancer used inside each rs group.
    * @return The name of the {@link LoadBalancer} in use.
    */
   public String getLoadBalancerClassName() {
-    return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadBalancerFactory
-        .getDefaultLoadBalancerClass().getName());
+    return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
+      LoadBalancerFactory.getDefaultLoadBalancerClass().getName());
   }
 
   /**
@@ -3534,13 +3529,13 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
-  public LoadBalancer getLoadBalancer() {
+  public RSGroupBasedLoadBalancer getLoadBalancer() {
     return balancer;
   }
 
   @Override
   public FavoredNodesManager getFavoredNodesManager() {
-    return favoredNodesManager;
+    return balancer.getFavoredNodesManager();
   }
 
   private long executePeerProcedure(AbstractPeerProcedure<?> procedure) throws IOException {
@@ -3858,7 +3853,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
-  public RSGroupInfoManager getRSRSGroupInfoManager() {
+  public RSGroupInfoManager getRSGroupInfoManager() {
     return rsGroupInfoManager;
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 0fc544a..d5ca1f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -65,6 +65,15 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse
   ServerName BOGUS_SERVER_NAME = ServerName.valueOf("localhost,1,1");
 
   /**
+   * Config for pluggable load balancers.
+   * @deprecated since 3.0.0, will be removed in 4.0.0. In the new implementation, as the base load
+   *             balancer will always be the rs group based one, you should just use
+   *             {@link org.apache.hadoop.hbase.HConstants#HBASE_MASTER_LOADBALANCER_CLASS} to
+   *             config the per group load balancer.
+   */
+  @Deprecated
+  String HBASE_RSGROUP_LOADBALANCER_CLASS = "hbase.rsgroup.grouploadbalancer.class";
+  /**
    * Set the current cluster status. This allows a LoadBalancer to map host name to a server
    */
   void setClusterMetrics(ClusterMetrics st);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 06a99fa..9c16c28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
 import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
 import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.LockType;
 import org.apache.hadoop.hbase.procedure2.LockedResource;
@@ -219,10 +220,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormaliz
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
-    .IsSnapshotCleanupEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
-    .IsSnapshotCleanupEnabledResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
@@ -275,10 +274,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormali
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
-    .SetSnapshotCleanupRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
-    .SetSnapshotCleanupResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
@@ -2380,12 +2377,18 @@ public class MasterRpcServices extends RSRpcServices
         LOG.debug("Some dead server is still under processing, won't clear the dead server list");
         response.addAllServerName(request.getServerNameList());
       } else {
+        DeadServer deadServer = master.getServerManager().getDeadServers();
+        Set<Address> clearedServers = new HashSet<>();
         for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
-          if (!master.getServerManager().getDeadServers()
-                  .removeDeadServer(ProtobufUtil.toServerName(pbServer))) {
+          ServerName server = ProtobufUtil.toServerName(pbServer);
+          if (!deadServer.removeDeadServer(server)) {
             response.addServerName(pbServer);
+          } else {
+            clearedServers.add(server.getAddress());
           }
         }
+        master.getRSGroupInfoManager().removeServers(clearedServers);
+        LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers);
       }
 
       if (master.cpHost != null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index ead40b9..ae55352 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -539,5 +539,5 @@ public interface MasterServices extends Server {
   /**
    * @return the {@link RSGroupInfoManager}
    */
-  RSGroupInfoManager getRSRSGroupInfoManager();
+  RSGroupInfoManager getRSGroupInfoManager();
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index debdf68..0f383bd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.regionserver.SequenceId;
+import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -325,6 +326,11 @@ public class AssignmentManager {
     return master.getLoadBalancer();
   }
 
+  private FavoredNodesPromoter getFavoredNodePromoter() {
+    return (FavoredNodesPromoter) ((RSGroupBasedLoadBalancer) master.getLoadBalancer())
+      .getInternalBalancer();
+  }
+
   private MasterProcedureEnv getProcedureEnvironment() {
     return master.getMasterProcedureExecutor().getEnvironment();
   }
@@ -368,7 +374,7 @@ public class AssignmentManager {
 
   public List<ServerName> getFavoredNodes(final RegionInfo regionInfo) {
     return this.shouldAssignRegionsWithFavoredNodes
-      ? ((FavoredStochasticBalancer) getBalancer()).getFavoredNodes(regionInfo)
+      ? getFavoredNodePromoter().getFavoredNodes(regionInfo)
       : ServerName.EMPTY_SERVER_LIST;
   }
 
@@ -1774,8 +1780,8 @@ public class AssignmentManager {
     regionStateStore.splitRegion(parent, daughterA, daughterB, serverName);
     if (shouldAssignFavoredNodes(parent)) {
       List<ServerName> onlineServers = this.master.getServerManager().getOnlineServersList();
-      ((FavoredNodesPromoter)getBalancer()).
-          generateFavoredNodesForDaughter(onlineServers, parent, daughterA, daughterB);
+      getFavoredNodePromoter().generateFavoredNodesForDaughter(onlineServers, parent, daughterA,
+        daughterB);
     }
   }
 
@@ -1800,8 +1806,7 @@ public class AssignmentManager {
     }
     regionStateStore.mergeRegions(child, mergeParents, serverName);
     if (shouldAssignFavoredNodes(child)) {
-      ((FavoredNodesPromoter)getBalancer()).
-        generateFavoredNodesForMergedRegion(child, mergeParents);
+      getFavoredNodePromoter().generateFavoredNodesForMergedRegion(child, mergeParents);
     }
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
index 1daa9e7..442dc44 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
@@ -473,6 +473,7 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
     }
   }
 
+  @Override
   public synchronized List<ServerName> getFavoredNodes(RegionInfo regionInfo) {
     return this.fnm.getFavoredNodes(regionInfo);
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java
index 9ce020b..bfda12e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java
@@ -17,11 +17,11 @@
  */
 package org.apache.hadoop.hbase.master.balancer;
 
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.master.LoadBalancer;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * The class that creates a load balancer from a conf.
@@ -30,8 +30,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 public class LoadBalancerFactory {
 
   /**
-   * The default {@link LoadBalancer} class. 
-   *
+   * The default {@link LoadBalancer} class.
    * @return The Class for the default {@link LoadBalancer}.
    */
   public static Class<? extends LoadBalancer> getDefaultLoadBalancerClass() {
@@ -40,16 +39,15 @@ public class LoadBalancerFactory {
 
   /**
    * Create a loadbalancer from the given conf.
-   * @param conf
    * @return A {@link LoadBalancer}
    */
   public static LoadBalancer getLoadBalancer(Configuration conf) {
-
     // Create the balancer
     Class<? extends LoadBalancer> balancerKlass =
-        conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(),
-          LoadBalancer.class);
-    return ReflectionUtils.newInstance(balancerKlass, conf);
-
+      conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(),
+        LoadBalancer.class);
+    LoadBalancer balancer = ReflectionUtils.newInstance(balancerKlass);
+    balancer.setConf(conf);
+    return balancer;
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
index e751034..5c35544 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
+import java.util.function.Supplier;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -28,6 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
@@ -122,4 +124,13 @@ public abstract class AbstractStateMachineNamespaceProcedure<TState>
   protected void releaseSyncLatch() {
     ProcedurePrepareLatch.releaseLatch(syncLatch, this);
   }
+
+  protected final void checkNamespaceRSGroup(MasterProcedureEnv env, NamespaceDescriptor nd)
+    throws IOException {
+    Supplier<String> forWhom = () -> "namespace " + nd.getName();
+    RSGroupInfo rsGroupInfo = MasterProcedureUtil.checkGroupExists(
+      env.getMasterServices().getRSGroupInfoManager()::getRSGroup,
+      MasterProcedureUtil.getNamespaceGroup(nd), forWhom);
+    MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
index 28f7585..bc60360 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -188,6 +188,7 @@ public class CreateNamespaceProcedure
       return false;
     }
     getTableNamespaceManager(env).validateTableAndRegionCount(nsDescriptor);
+    checkNamespaceRSGroup(env, nsDescriptor);
     return true;
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 34fde27..5cdfede 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -21,11 +21,12 @@ package org.apache.hadoop.hbase.master.procedure;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-
+import java.util.function.Supplier;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -42,8 +44,10 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -254,10 +258,27 @@ public class CreateTableProcedure
 
     // check that we have at least 1 CF
     if (tableDescriptor.getColumnFamilyCount() == 0) {
-      setFailure("master-create-table", new DoNotRetryIOException("Table " +
-          getTableName().toString() + " should have at least one column family."));
+      setFailure("master-create-table", new DoNotRetryIOException(
+        "Table " + getTableName().toString() + " should have at least one column family."));
       return false;
     }
+    if (!tableName.isSystemTable()) {
+      // do not check rs group for system tables as we may block the bootstrap.
+      Supplier<String> forWhom = () -> "table " + tableName;
+      RSGroupInfo rsGroupInfo = MasterProcedureUtil.checkGroupExists(
+        env.getMasterServices().getRSGroupInfoManager()::getRSGroup,
+        tableDescriptor.getRegionServerGroup(), forWhom);
+      if (rsGroupInfo == null) {
+        // we do not set rs group info on table, check if we have one on namespace
+        String namespace = tableName.getNamespaceAsString();
+        NamespaceDescriptor nd = env.getMasterServices().getClusterSchema().getNamespace(namespace);
+        forWhom = () -> "table " + tableName + "(inherit from namespace)";
+        rsGroupInfo = MasterProcedureUtil.checkGroupExists(
+          env.getMasterServices().getRSGroupInfoManager()::getRSGroup,
+          MasterProcedureUtil.getNamespaceGroup(nd), forWhom);
+      }
+      MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
+    }
 
     return true;
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
index 1e488b6..df8fa2b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
@@ -18,14 +18,18 @@
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
+import java.util.Optional;
+import java.util.function.Supplier;
 import java.util.regex.Pattern;
-
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureException;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.NonceKey;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -186,10 +190,52 @@ public final class MasterProcedureUtil {
    * keep trying. The default proc.getException().unwrapRemoteException
    * doesn't have access to DNRIOE from the procedure2 module.
    */
-  public static IOException unwrapRemoteIOException(Procedure proc) {
+  public static IOException unwrapRemoteIOException(Procedure<?> proc) {
     Exception e = proc.getException().unwrapRemoteException();
     // Do not retry ProcedureExceptions!
     return (e instanceof ProcedureException)? new DoNotRetryIOException(e):
         proc.getException().unwrapRemoteIOException();
   }
+
+  /**
+   * Do not allow creating new tables/namespaces which has an empty rs group, expect the default rs
+   * group. Notice that we do not check for online servers, as this is not stable because region
+   * servers can die at any time.
+   */
+  public static void checkGroupNotEmpty(RSGroupInfo rsGroupInfo, Supplier<String> forWhom)
+    throws ConstraintException {
+    if (rsGroupInfo == null || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
+      // we do not have a rs group config or we explicitly set the rs group to default, then no need
+      // to check.
+      return;
+    }
+    if (rsGroupInfo.getServers().isEmpty()) {
+      throw new ConstraintException(
+        "No servers in the rsgroup " + rsGroupInfo.getName() + " for " + forWhom.get());
+    }
+  }
+
+  @FunctionalInterface
+  public interface RSGroupGetter {
+    RSGroupInfo get(String groupName) throws IOException;
+  }
+
+  public static RSGroupInfo checkGroupExists(RSGroupGetter getter, Optional<String> optGroupName,
+    Supplier<String> forWhom) throws IOException {
+    if (optGroupName.isPresent()) {
+      String groupName = optGroupName.get();
+      RSGroupInfo group = getter.get(groupName);
+      if (group == null) {
+        throw new ConstraintException(
+          "Region server group " + groupName + " for " + forWhom.get() + " does not exit");
+      }
+      return group;
+    }
+    return null;
+  }
+
+  public static Optional<String> getNamespaceGroup(NamespaceDescriptor namespaceDesc) {
+    return Optional
+      .ofNullable(namespaceDesc.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP));
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
index e3327e2..c52b93d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
@@ -18,10 +18,11 @@
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
+import java.util.Objects;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
-import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -171,24 +172,22 @@ public class ModifyNamespaceProcedure
 
   /**
    * Action before any real action of adding namespace.
-   * @param env MasterProcedureEnv
-   * @throws IOException
    */
   private boolean prepareModify(final MasterProcedureEnv env) throws IOException {
-    if (getTableNamespaceManager(env).doesNamespaceExist(newNsDescriptor.getName()) == false) {
+    if (!getTableNamespaceManager(env).doesNamespaceExist(newNsDescriptor.getName())) {
       setFailure("master-modify-namespace",
         new NamespaceNotFoundException(newNsDescriptor.getName()));
       return false;
     }
-    try {
-      getTableNamespaceManager(env).validateTableAndRegionCount(newNsDescriptor);
-    } catch (ConstraintException e) {
-      setFailure("master-modify-namespace", e);
-      return false;
-    }
+    getTableNamespaceManager(env).validateTableAndRegionCount(newNsDescriptor);
 
     // This is used for rollback
     oldNsDescriptor = getTableNamespaceManager(env).get(newNsDescriptor.getName());
+    if (!Objects.equals(
+      oldNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP),
+      newNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) {
+      checkNamespaceRSGroup(env, newNsDescriptor);
+    }
     return true;
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 8e435dd..3f26807 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-
+import java.util.function.Supplier;
 import org.apache.hadoop.hbase.ConcurrentTableModificationException;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -242,8 +243,6 @@ public class ModifyTableProcedure
 
   /**
    * Check conditions before any real action of modifying a table.
-   * @param env MasterProcedureEnv
-   * @throws IOException
    */
   private void prepareModify(final MasterProcedureEnv env) throws IOException {
     // Checks whether the table exists
@@ -285,6 +284,8 @@ public class ModifyTableProcedure
       }
     }
 
+
+
     // Find out whether all column families in unmodifiedTableDescriptor also exists in
     // the modifiedTableDescriptor. This is to determine whether we are safe to rollback.
     final Set<byte[]> oldFamilies = unmodifiedTableDescriptor.getColumnFamilyNames();
@@ -295,6 +296,14 @@ public class ModifyTableProcedure
         break;
       }
     }
+    if (!unmodifiedTableDescriptor.getRegionServerGroup()
+      .equals(modifiedTableDescriptor.getRegionServerGroup())) {
+      Supplier<String> forWhom = () -> "table " + getTableName();
+      RSGroupInfo rsGroupInfo = MasterProcedureUtil.checkGroupExists(
+        env.getMasterServices().getRSGroupInfoManager()::getRSGroup,
+        modifiedTableDescriptor.getRegionServerGroup(), forWhom);
+      MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
+    }
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 0bde67b..353b4d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -20,38 +20,25 @@ package org.apache.hadoop.hbase.rsgroup;
 import com.google.protobuf.Service;
 import java.io.IOException;
 import java.util.Collections;
-import java.util.List;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Set;
-import java.util.function.Supplier;
-import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.MasterObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
-// TODO: Encapsulate MasterObserver functions into separate subclass.
+/**
+ * @deprecated Keep it here only for compatibility with old client, all the logics have been moved
+ *             into core of HBase.
+ */
+@Deprecated
 @CoreCoprocessor
 @InterfaceAudience.Private
-public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
+public class RSGroupAdminEndpoint implements MasterCoprocessor {
   // Only instance of RSGroupInfoManager. RSGroup aware load balancers ask for this instance on
   // their setup.
   private MasterServices master;
@@ -66,13 +53,8 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
     }
 
     master = ((HasMasterServices) env).getMasterServices();
-    groupInfoManager = master.getRSRSGroupInfoManager();
+    groupInfoManager = master.getRSGroupInfoManager();
     groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
-    Class<?> clazz =
-      master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
-    if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
-      throw new IOException("Configured balancer does not support RegionServer groups.");
-    }
     AccessChecker accessChecker = ((HasMasterServices) env).getMasterServices().getAccessChecker();
 
     // set the user-provider.
@@ -89,11 +71,6 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
     return Collections.singleton(groupAdminService);
   }
 
-  @Override
-  public Optional<MasterObserver> getMasterObserver() {
-    return Optional.of(this);
-  }
-
   RSGroupInfoManager getGroupInfoManager() {
     return groupInfoManager;
   }
@@ -102,108 +79,4 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
   RSGroupAdminServiceImpl getGroupAdminService() {
     return groupAdminService;
   }
-
-  /////////////////////////////////////////////////////////////////////////////
-  // MasterObserver overrides
-  /////////////////////////////////////////////////////////////////////////////
-
-  @Override
-  public void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
-    List<ServerName> servers, List<ServerName> notClearedServers) throws IOException {
-    Set<Address> clearedServer =
-      servers.stream().filter(server -> !notClearedServers.contains(server))
-        .map(ServerName::getAddress).collect(Collectors.toSet());
-    if (!clearedServer.isEmpty()) {
-      groupAdminServer.removeServers(clearedServer);
-    }
-  }
-
-  private RSGroupInfo checkGroupExists(Optional<String> optGroupName, Supplier<String> forWhom)
-    throws IOException {
-    if (optGroupName.isPresent()) {
-      String groupName = optGroupName.get();
-      RSGroupInfo group = groupAdminServer.getRSGroupInfo(groupName);
-      if (group == null) {
-        throw new ConstraintException(
-          "Region server group " + groupName + " for " + forWhom.get() + " does not exit");
-      }
-      return group;
-    }
-    return null;
-  }
-
-  private Optional<String> getNamespaceGroup(NamespaceDescriptor namespaceDesc) {
-    return Optional
-      .ofNullable(namespaceDesc.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP));
-  }
-
-  // Do not allow creating new tables/namespaces which has an empty rs group, expect the default rs
-  // group. Notice that we do not check for online servers, as this is not stable because region
-  // servers can die at any time.
-  private void checkGroupNotEmpty(RSGroupInfo rsGroupInfo, Supplier<String> forWhom)
-    throws ConstraintException {
-    if (rsGroupInfo == null || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
-      // we do not have a rs group config or we explicitly set the rs group to default, then no need
-      // to check.
-      return;
-    }
-    if (rsGroupInfo.getServers().isEmpty()) {
-      throw new ConstraintException(
-        "No servers in the rsgroup " + rsGroupInfo.getName() + " for " + forWhom.get());
-    }
-  }
-
-  @Override
-  public void preCreateTableAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
-    TableDescriptor desc, RegionInfo[] regions) throws IOException {
-    if (desc.getTableName().isSystemTable()) {
-      // do not check for system tables as we may block the bootstrap.
-      return;
-    }
-    Supplier<String> forWhom = () -> "table " + desc.getTableName();
-    RSGroupInfo rsGroupInfo = checkGroupExists(desc.getRegionServerGroup(), forWhom);
-    if (rsGroupInfo == null) {
-      // we do not set rs group info on table, check if we have one on namespace
-      String namespace = desc.getTableName().getNamespaceAsString();
-      NamespaceDescriptor nd = master.getClusterSchema().getNamespace(namespace);
-      forWhom = () -> "table " + desc.getTableName() + "(inherit from namespace)";
-      rsGroupInfo = checkGroupExists(getNamespaceGroup(nd), forWhom);
-    }
-    checkGroupNotEmpty(rsGroupInfo, forWhom);
-  }
-
-  @Override
-  public TableDescriptor preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
-    TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor)
-    throws IOException {
-    if (!currentDescriptor.getRegionServerGroup().equals(newDescriptor.getRegionServerGroup())) {
-      Supplier<String> forWhom = () -> "table " + newDescriptor.getTableName();
-      RSGroupInfo rsGroupInfo = checkGroupExists(newDescriptor.getRegionServerGroup(), forWhom);
-      checkGroupNotEmpty(rsGroupInfo, forWhom);
-    }
-    return MasterObserver.super.preModifyTable(ctx, tableName, currentDescriptor, newDescriptor);
-  }
-
-  private void checkNamespaceGroup(NamespaceDescriptor nd) throws IOException {
-    Supplier<String> forWhom = () -> "namespace " + nd.getName();
-    RSGroupInfo rsGroupInfo = checkGroupExists(getNamespaceGroup(nd), forWhom);
-    checkGroupNotEmpty(rsGroupInfo, forWhom);
-  }
-
-  @Override
-  public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
-    NamespaceDescriptor ns) throws IOException {
-    checkNamespaceGroup(ns);
-  }
-
-  @Override
-  public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
-    NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor)
-    throws IOException {
-    if (!Objects.equals(
-      currentNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP),
-      newNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) {
-      checkNamespaceGroup(newNsDescriptor);
-    }
-  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index 0f943d0..fb3db84 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -35,13 +35,15 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
-import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
+import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -67,12 +69,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
  * providing appropriate assignments for user tables.
  */
 @InterfaceAudience.Private
-public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
+public class RSGroupBasedLoadBalancer implements LoadBalancer {
   private static final Logger LOG = LoggerFactory.getLogger(RSGroupBasedLoadBalancer.class);
 
   private Configuration config;
   private ClusterMetrics clusterStatus;
   private MasterServices masterServices;
+  private FavoredNodesManager favoredNodesManager;
   private volatile RSGroupInfoManager rsGroupInfoManager;
   private LoadBalancer internalBalancer;
 
@@ -330,36 +333,42 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
 
   @Override
   public void initialize() throws IOException {
-    try {
+    if (rsGroupInfoManager == null) {
+      rsGroupInfoManager = masterServices.getRSGroupInfoManager();
       if (rsGroupInfoManager == null) {
-        List<RSGroupAdminEndpoint> cps =
-          masterServices.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class);
-        if (cps.size() != 1) {
-          String msg = "Expected one implementation of GroupAdminEndpoint but found " + cps.size();
-          LOG.error(msg);
-          throw new HBaseIOException(msg);
-        }
-        rsGroupInfoManager = cps.get(0).getGroupInfoManager();
-        if(rsGroupInfoManager == null){
-          String msg = "RSGroupInfoManager hasn't been initialized";
-          LOG.error(msg);
-          throw new HBaseIOException(msg);
-        }
-        rsGroupInfoManager.start();
+        String msg = "RSGroupInfoManager hasn't been initialized";
+        LOG.error(msg);
+        throw new HBaseIOException(msg);
       }
-    } catch (IOException e) {
-      throw new HBaseIOException("Failed to initialize GroupInfoManagerImpl", e);
+      rsGroupInfoManager.start();
     }
 
     // Create the balancer
-    Class<? extends LoadBalancer> balancerKlass = config.getClass(HBASE_RSGROUP_LOADBALANCER_CLASS,
-        StochasticLoadBalancer.class, LoadBalancer.class);
-    internalBalancer = ReflectionUtils.newInstance(balancerKlass, config);
+    Class<? extends LoadBalancer> balancerClass;
+    String balancerClassName = config.get(HBASE_RSGROUP_LOADBALANCER_CLASS);
+    if (balancerClassName == null) {
+      balancerClass = config.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
+        LoadBalancerFactory.getDefaultLoadBalancerClass(), LoadBalancer.class);
+    } else {
+      try {
+        balancerClass = Class.forName(balancerClassName).asSubclass(LoadBalancer.class);
+      } catch (ClassNotFoundException e) {
+        throw new IOException(e);
+      }
+    }
+    // avoid infinite nesting
+    if (getClass().isAssignableFrom(balancerClass)) {
+      balancerClass = LoadBalancerFactory.getDefaultLoadBalancerClass();
+    }
+    internalBalancer = ReflectionUtils.newInstance(balancerClass);
+    if (internalBalancer instanceof FavoredNodesPromoter) {
+      favoredNodesManager = new FavoredNodesManager(masterServices);
+    }
+    internalBalancer.setConf(config);
     internalBalancer.setMasterServices(masterServices);
     if(clusterStatus != null) {
       internalBalancer.setClusterMetrics(clusterStatus);
     }
-    internalBalancer.setConf(config);
     internalBalancer.initialize();
   }
 
@@ -402,6 +411,14 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
     this.rsGroupInfoManager = rsGroupInfoManager;
   }
 
+  public LoadBalancer getInternalBalancer() {
+    return internalBalancer;
+  }
+
+  public FavoredNodesManager getFavoredNodesManager() {
+    return favoredNodesManager;
+  }
+
   @Override
   public void postMasterStartupInitialize() {
     this.internalBalancer.postMasterStartupInitialize();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java
deleted file mode 100644
index d091b3c..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rsgroup;
-
-import org.apache.hadoop.hbase.master.LoadBalancer;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Marker Interface. RSGroups feature will check for a LoadBalancer
- * marked with this Interface before it runs.
- */
-@InterfaceAudience.Private
-public interface RSGroupableBalancer extends LoadBalancer {
-  /** Config for pluggable load balancers */
-  String HBASE_RSGROUP_LOADBALANCER_CLASS = "hbase.rsgroup.grouploadbalancer.class";
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
index 1fe43f1..df39862 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
@@ -115,7 +115,7 @@ public class TestNamespace {
 
     //verify existence of system tables
     Set<TableName> systemTables = Sets.newHashSet(
-        TableName.META_TABLE_NAME);
+        TableName.META_TABLE_NAME, TableName.valueOf("hbase:rsgroup"));
     List<TableDescriptor> descs = admin.listTableDescriptorsByNamespace(
       Bytes.toBytes(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()));
     assertEquals(systemTables.size(), descs.size());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
index a576adc..fc2e9f7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
@@ -504,14 +504,14 @@ public abstract class AbstractTestDLS {
       for (String oregion : regions)
         LOG.debug("Region still online: " + oregion);
     }
-    assertEquals(1 + existingRegions, regions.size());
+    assertEquals(2 + existingRegions, regions.size());
     LOG.debug("Enabling table\n");
     TEST_UTIL.getAdmin().enableTable(tableName);
     LOG.debug("Waiting for no more RIT\n");
     blockUntilNoRIT();
     LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n");
     regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
-    assertEquals(numRegions + 1 + existingRegions, regions.size());
+    assertEquals(numRegions + 2 + existingRegions, regions.size());
     return table;
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 26772ae..71f8a91 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -493,7 +493,7 @@ public class MockNoopMasterServices implements MasterServices {
   }
 
   @Override
-  public RSGroupInfoManager getRSRSGroupInfoManager() {
+  public RSGroupInfoManager getRSGroupInfoManager() {
     return null;
   }
 }
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java
index 5a38234..a8d80b6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java
@@ -43,6 +43,8 @@ public class TestClusterRestart extends AbstractTestRestartCluster {
 
   private static final Logger LOG = LoggerFactory.getLogger(TestClusterRestart.class);
 
+  private static final int NUM_REGIONS = 4;
+
   @Override
   protected boolean splitWALCoordinatedByZk() {
     return true;
@@ -61,7 +63,7 @@ public class TestClusterRestart extends AbstractTestRestartCluster {
     }
 
     List<RegionInfo> allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false);
-    assertEquals(3, allRegions.size());
+    assertEquals(NUM_REGIONS, allRegions.size());
 
     LOG.info("\n\nShutting down cluster");
     UTIL.shutdownMiniHBaseCluster();
@@ -76,7 +78,7 @@ public class TestClusterRestart extends AbstractTestRestartCluster {
     // Otherwise we're reusing an Connection that has gone stale because
     // the shutdown of the cluster also called shut of the connection.
     allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false);
-    assertEquals(3, allRegions.size());
+    assertEquals(NUM_REGIONS, allRegions.size());
     LOG.info("\n\nWaiting for tables to be available");
     for (TableName TABLE : TABLES) {
       try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
index 75d9ee1..61720df 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.StartMiniClusterOption;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -90,6 +91,7 @@ public class TestMasterMetrics {
     cluster = TEST_UTIL.getHBaseCluster();
     LOG.info("Waiting for active/ready master");
     cluster.waitForActiveAndReadyMaster();
+    TEST_UTIL.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
     master = cluster.getMaster();
   }
 
@@ -131,7 +133,7 @@ public class TestMasterMetrics {
     MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
     boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
     metricsHelper.assertGauge("numRegionServers", 1 + (tablesOnMaster ? 1 : 0), masterSource);
-    metricsHelper.assertGauge("averageLoad", 1, masterSource);
+    metricsHelper.assertGauge("averageLoad", 2, masterSource);
     metricsHelper.assertGauge("numDeadRegionServers", 0, masterSource);
 
     metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
index fca2866..2d0af3e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
@@ -92,7 +92,7 @@ public class TestMasterRestartAfterDisablingTable {
 
     NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
     assertEquals("The number of regions for the table tableRestart should be 0 and only" +
-      "the catalog table should be present.", 1, regions.size());
+      "the catalog table should be present.", 2, regions.size());
 
     List<MasterThread> masterThreads = cluster.getMasterThreads();
     MasterThread activeMaster = null;
@@ -120,7 +120,7 @@ public class TestMasterRestartAfterDisablingTable {
     log("Verifying there are " + numRegions + " assigned on cluster\n");
     regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
     assertEquals("The assigned regions were not onlined after master" +
-      " switch except for the catalog table.", 5, regions.size());
+      " switch except for the catalog table.", 6, regions.size());
     assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager()
       .isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
     ht.close();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
index 0aba487..2dc1b6c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
@@ -121,7 +121,7 @@ public class TestRollingRestart {
         log("Region still online: " + oregion);
       }
     }
-    assertEquals(1, regions.size());
+    assertEquals(2, regions.size());
     log("Enabling table\n");
     TEST_UTIL.getAdmin().enableTable(tableName);
     log("Waiting for no more RIT\n");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java
index 0aa2322..7677eb6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java
@@ -186,7 +186,7 @@ public class TestFavoredStochasticBalancerPickers extends BalancerTestBase {
     regionFinder.setServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
     Cluster cluster = new Cluster(serverAssignments, null, regionFinder, new RackManager(conf));
     LoadOnlyFavoredStochasticBalancer balancer = (LoadOnlyFavoredStochasticBalancer) TEST_UTIL
-        .getMiniHBaseCluster().getMaster().getLoadBalancer();
+        .getMiniHBaseCluster().getMaster().getLoadBalancer().getInternalBalancer();
 
     cluster.sortServersByRegionCount();
     Integer[] servers = cluster.serverIndicesSortedByRegionCount;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
index 7ab5dcc..2503825 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
@@ -59,7 +59,6 @@ public class TestRegionOpen {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestRegionOpen.class);
 
-  @SuppressWarnings("unused")
   private static final Logger LOG = LoggerFactory.getLogger(TestRegionOpen.class);
   private static final int NB_SERVERS = 1;
 
@@ -87,6 +86,7 @@ public class TestRegionOpen {
     final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName());
     ThreadPoolExecutor exec = getRS().getExecutorService()
         .getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION);
+    HTU.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
     long completed = exec.getCompletedTaskCount();
 
     HTableDescriptor htd = new HTableDescriptor(tableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
index 70df61e..dc0a918 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
@@ -146,7 +146,7 @@ public class TestRegionReplicasWithRestartScenarios {
     checkDuplicates(onlineRegions3);
     assertFalse(res);
     int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size();
-    assertEquals(61, totalRegions);
+    assertEquals(62, totalRegions);
   }
 
   private boolean checkDuplicates(Collection<HRegion> onlineRegions3) throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index b57ff41..ab7ff16 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -225,7 +225,7 @@ public class TestRegionServerMetrics {
 
   @Test
   public void testRegionCount() throws Exception {
-    metricsHelper.assertGauge("regionCount", TABLES_ON_MASTER ? 1 : 2, serverSource);
+    metricsHelper.assertGauge("regionCount", TABLES_ON_MASTER ? 1 : 3, serverSource);
   }
 
   @Test
@@ -335,7 +335,7 @@ public class TestRegionServerMetrics {
     TEST_UTIL.getAdmin().flush(tableName);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    assertGauge("storeCount", TABLES_ON_MASTER ? 1 : 5);
+    assertGauge("storeCount", TABLES_ON_MASTER ? 1 : 6);
     assertGauge("storeFileCount", 1);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
index d3577f2..7ac1a49 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
@@ -163,8 +163,7 @@ public class TestRSGroupsOfflineMode {
     });
 
     // Get groupInfoManager from the new active master.
-    RSGroupInfoManager groupMgr = ((MiniHBaseCluster) cluster).getMaster()
-      .getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class).getGroupInfoManager();
+    RSGroupInfoManager groupMgr = ((MiniHBaseCluster) cluster).getMaster().getRSGroupInfoManager();
     // Make sure balancer is in offline mode, since this is what we're testing.
     assertFalse(groupMgr.isOnline());
     // Kill final regionserver to see the failover happens for all tables except GROUP table since
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
index 14ea0b3..e03a823 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
@@ -102,6 +102,7 @@ public class TestTablePermissions {
 
     // Wait for the ACL table to become available
     UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME);
+    UTIL.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
 
     ZKW = new ZKWatcher(UTIL.getConfiguration(),
       "TestTablePermissions", ABORTABLE);
@@ -222,7 +223,7 @@ public class TestTablePermissions {
     // check full load
     Map<byte[], ListMultimap<String, UserPermission>> allPerms = PermissionStorage.loadAll(conf);
     assertEquals("Full permission map should have entries for both test tables",
-        2, allPerms.size());
+        3, allPerms.size());
 
     userPerms = allPerms.get(TEST_TABLE.getName()).get("hubert");
     assertNotNull(userPerms);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 316a321..0ac07d8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -24,6 +24,7 @@ import java.util.stream.Stream;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
@@ -52,6 +53,7 @@ public class TestHBaseFsckReplication {
   public static void setUp() throws Exception {
     UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
     UTIL.startMiniCluster(1);
+    UTIL.waitTableAvailable(TableName.valueOf("hbase:rsgroup"));
   }
 
   @AfterClass


[hbase] 07/11: HBASE-22819 Automatically migrate the rs group config for table after HBASE-22695 (#498)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit af122d37836340f2ee62692e288b1b3972a66dbc
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Sun Aug 25 11:19:04 2019 +0800

    HBASE-22819 Automatically migrate the rs group config for table after HBASE-22695 (#498)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../apache/hadoop/hbase/rsgroup/RSGroupInfo.java   |   4 +-
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java   |   2 +-
 .../hbase/rsgroup/RSGroupAdminServiceImpl.java     |   2 +-
 .../hadoop/hbase/rsgroup/RSGroupInfoManager.java   |  14 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      | 158 +++++++++++++++---
 .../apache/hadoop/hbase/rsgroup/RSGroupUtil.java   |  39 ++---
 .../hbase/rsgroup/TestMigrateRSGroupInfo.java      | 179 +++++++++++++++++++++
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java     |   3 +-
 8 files changed, 343 insertions(+), 58 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index ad55d1f..817e237 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -19,10 +19,8 @@
 package org.apache.hadoop.hbase.rsgroup;
 
 import java.util.Collection;
-import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
-
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -104,7 +102,7 @@ public class RSGroupInfo {
   /**
    * Get list of servers.
    */
-  public Set<Address> getServers() {
+  public SortedSet<Address> getServers() {
     return servers;
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 59950e1..1e324e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -61,7 +61,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
           "one server in 'default' RSGroup.";
 
   private MasterServices master;
-  private final RSGroupInfoManager rsGroupInfoManager;
+  final RSGroupInfoManager rsGroupInfoManager;
 
   /** Define the config key of retries threshold when movements failed */
   //made package private for testing
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java
index 6bc4519..749d353 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java
@@ -164,7 +164,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
       }
       checkPermission("getRSGroupInfoOfTable");
       Optional<RSGroupInfo> optGroup =
-          RSGroupUtil.getRSGroupInfo(master, groupAdminServer, tableName);
+        RSGroupUtil.getRSGroupInfo(master, groupAdminServer.rsGroupInfoManager, tableName);
       if (optGroup.isPresent()) {
         builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(optGroup.get())));
       } else {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 28f7c1f..1b9f3ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.rsgroup;
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -68,11 +69,6 @@ public interface RSGroupInfoManager {
   List<RSGroupInfo> listRSGroups() throws IOException;
 
   /**
-   * Refresh/reload the group information from the persistent store
-   */
-  void refresh() throws IOException;
-
-  /**
    * Whether the manager is able to fully return group metadata
    * @return whether the manager is in online mode
    */
@@ -83,4 +79,12 @@ public interface RSGroupInfoManager {
    * @param servers set of servers to remove
    */
   void removeServers(Set<Address> servers) throws IOException;
+
+  /**
+   * Get {@code RSGroupInfo} for the given table.
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. Only for compatibility, where we upgrade
+   *             from a version that stores table names for a rs group in the {@code RSGroupInfo}.
+   */
+  @Deprecated
+  RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index eaf23f3..6725066 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.rsgroup;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.AsyncTable;
@@ -76,6 +78,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@@ -104,9 +107,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   private static final Logger LOG = LoggerFactory.getLogger(RSGroupInfoManagerImpl.class);
 
-  private static final String REASSIGN_WAIT_INTERVAL_KEY = "hbase.rsgroup.reassign.wait";
-  private static final long DEFAULT_REASSIGN_WAIT_INTERVAL = 30 * 1000L;
-
   // Assigned before user tables
   @VisibleForTesting
   static final TableName RSGROUP_TABLE_NAME =
@@ -120,6 +120,9 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   @VisibleForTesting
   static final byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i");
 
+  @VisibleForTesting
+  static final String MIGRATE_THREAD_NAME = "Migrate-RSGroup-Tables";
+
   private static final byte[] ROW_KEY = { 0 };
 
   /** Table descriptor for <code>hbase:rsgroup</code> catalog table */
@@ -140,7 +143,30 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
   // There two Maps are immutable and wholesale replaced on each modification
   // so are safe to access concurrently. See class comment.
-  private volatile Map<String, RSGroupInfo> rsGroupMap = Collections.emptyMap();
+  private static final class RSGroupInfoHolder {
+    final ImmutableMap<String, RSGroupInfo> groupName2Group;
+    final ImmutableMap<TableName, RSGroupInfo> tableName2Group;
+
+    RSGroupInfoHolder() {
+      this(Collections.emptyMap());
+    }
+
+    RSGroupInfoHolder(Map<String, RSGroupInfo> rsGroupMap) {
+      ImmutableMap.Builder<String, RSGroupInfo> group2Name2GroupBuilder = ImmutableMap.builder();
+      ImmutableMap.Builder<TableName, RSGroupInfo> tableName2GroupBuilder = ImmutableMap.builder();
+      rsGroupMap.forEach((groupName, rsGroupInfo) -> {
+        group2Name2GroupBuilder.put(groupName, rsGroupInfo);
+        if (!groupName.equals(RSGroupInfo.DEFAULT_GROUP)) {
+          rsGroupInfo.getTables()
+            .forEach(tableName -> tableName2GroupBuilder.put(tableName, rsGroupInfo));
+        }
+      });
+      this.groupName2Group = group2Name2GroupBuilder.build();
+      this.tableName2Group = tableName2GroupBuilder.build();
+    }
+  }
+
+  private volatile RSGroupInfoHolder holder = new RSGroupInfoHolder();
 
   private final MasterServices masterServices;
   private final AsyncClusterConnection conn;
@@ -160,9 +186,10 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
 
   private synchronized void init() throws IOException {
-    refresh();
+    refresh(false);
     serverEventsListenerThread.start();
     masterServices.getServerManager().registerListener(serverEventsListenerThread);
+    migrate();
   }
 
   static RSGroupInfoManager getInstance(MasterServices master) throws IOException {
@@ -179,6 +206,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   @Override
   public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException {
     checkGroupName(rsGroupInfo.getName());
+    Map<String, RSGroupInfo> rsGroupMap = holder.groupName2Group;
     if (rsGroupMap.get(rsGroupInfo.getName()) != null ||
       rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
       throw new DoNotRetryIOException("Group already exists: " + rsGroupInfo.getName());
@@ -235,7 +263,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       }
       dst.addServer(el);
     }
-    Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
+    Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(holder.groupName2Group);
     newGroupMap.put(src.getName(), src);
     newGroupMap.put(dst.getName(), dst);
     flushConfig(newGroupMap);
@@ -244,7 +272,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
   @Override
   public RSGroupInfo getRSGroupOfServer(Address serverHostPort) throws IOException {
-    for (RSGroupInfo info : rsGroupMap.values()) {
+    for (RSGroupInfo info : holder.groupName2Group.values()) {
       if (info.containsServer(serverHostPort)) {
         return info;
       }
@@ -254,11 +282,12 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
   @Override
   public RSGroupInfo getRSGroup(String groupName) {
-    return rsGroupMap.get(groupName);
+    return holder.groupName2Group.get(groupName);
   }
 
   @Override
   public synchronized void removeRSGroup(String groupName) throws IOException {
+    Map<String, RSGroupInfo> rsGroupMap = holder.groupName2Group;
     if (!rsGroupMap.containsKey(groupName) || groupName.equals(RSGroupInfo.DEFAULT_GROUP)) {
       throw new DoNotRetryIOException(
         "Group " + groupName + " does not exist or is a reserved " + "group");
@@ -270,7 +299,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
   @Override
   public List<RSGroupInfo> listRSGroups() {
-    return Lists.newArrayList(rsGroupMap.values());
+    return Lists.newArrayList(holder.groupName2Group.values());
   }
 
   @Override
@@ -298,7 +327,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     }
 
     if (rsGroupInfos.size() > 0) {
-      Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
+      Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(holder.groupName2Group);
       newGroupMap.putAll(rsGroupInfos);
       flushConfig(newGroupMap);
     }
@@ -349,9 +378,90 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     return RSGroupInfoList;
   }
 
-  @Override
-  public void refresh() throws IOException {
-    refresh(false);
+  private void migrate(Collection<RSGroupInfo> groupList) {
+    TableDescriptors tds = masterServices.getTableDescriptors();
+    for (RSGroupInfo groupInfo : groupList) {
+      if (groupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
+        continue;
+      }
+      SortedSet<TableName> failedTables = new TreeSet<>();
+      for (TableName tableName : groupInfo.getTables()) {
+        LOG.debug("Migrating {} in group {}", tableName, groupInfo.getName());
+        TableDescriptor oldTd;
+        try {
+          oldTd = tds.get(tableName);
+        } catch (IOException e) {
+          LOG.warn("Failed to migrate {} in group {}", tableName, groupInfo.getName(), e);
+          failedTables.add(tableName);
+          continue;
+        }
+        if (oldTd == null) {
+          continue;
+        }
+        if (oldTd.getRegionServerGroup().isPresent()) {
+          // either we have already migrated it or that user has set the rs group using the new
+          // code which will set the group directly on table descriptor, skip.
+          LOG.debug("Skip migrating {} since it is already in group {}", tableName,
+            oldTd.getRegionServerGroup().get());
+          continue;
+        }
+        TableDescriptor newTd = TableDescriptorBuilder.newBuilder(oldTd)
+          .setRegionServerGroup(groupInfo.getName()).build();
+        // This is a bit tricky. Since we know that the region server group config in
+        // TableDescriptor will only be used at master side, it is fine to just update the table
+        // descriptor on file system and also the cache, without reopening all the regions. This
+        // will be much faster than the normal modifyTable. And when upgrading, we will update
+        // master first and then region server, so after all the region servers has been reopened,
+        // the new TableDescriptor will be loaded.
+        try {
+          tds.add(newTd);
+        } catch (IOException e) {
+          LOG.warn("Failed to migrate {} in group {}", tableName, groupInfo.getName(), e);
+          failedTables.add(tableName);
+          continue;
+        }
+      }
+      LOG.debug("Done migrating {}, failed tables {}", groupInfo.getName(), failedTables);
+      synchronized (RSGroupInfoManagerImpl.this) {
+        Map<String, RSGroupInfo> rsGroupMap = holder.groupName2Group;
+        RSGroupInfo currentInfo = rsGroupMap.get(groupInfo.getName());
+        if (currentInfo != null) {
+          RSGroupInfo newInfo =
+            new RSGroupInfo(currentInfo.getName(), currentInfo.getServers(), failedTables);
+          Map<String, RSGroupInfo> newGroupMap = new HashMap<>(rsGroupMap);
+          newGroupMap.put(groupInfo.getName(), newInfo);
+          try {
+            flushConfig(newGroupMap);
+          } catch (IOException e) {
+            LOG.warn("Failed to persist rs group {}", newInfo.getName(), e);
+          }
+        }
+      }
+    }
+  }
+
+  // Migrate the table rs group info from RSGroupInfo into the table descriptor
+  // Notice that we do not want to block the initialize so this will be done in background, and
+  // during the migrating, the rs group info maybe incomplete and cause region to be misplaced.
+  private void migrate() {
+    Thread migrateThread = new Thread(MIGRATE_THREAD_NAME) {
+
+      @Override
+      public void run() {
+        LOG.info("Start migrating table rs group config");
+        while (!masterServices.isStopped()) {
+          Collection<RSGroupInfo> groups = holder.groupName2Group.values();
+          boolean hasTables = groups.stream().anyMatch(r -> !r.getTables().isEmpty());
+          if (!hasTables) {
+            break;
+          }
+          migrate(groups);
+        }
+        LOG.info("Done migrating table rs group info");
+      }
+    };
+    migrateThread.setDaemon(true);
+    migrateThread.start();
   }
 
   /**
@@ -381,7 +491,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       newGroupMap.put(group.getName(), group);
     }
     resetRSGroupMap(newGroupMap);
-    updateCacheOfRSGroups(rsGroupMap.keySet());
+    updateCacheOfRSGroups(newGroupMap.keySet());
   }
 
   private void flushConfigTable(Map<String, RSGroupInfo> groupMap) throws IOException {
@@ -411,20 +521,20 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   }
 
   private synchronized void flushConfig() throws IOException {
-    flushConfig(this.rsGroupMap);
+    flushConfig(holder.groupName2Group);
   }
 
   private synchronized void flushConfig(Map<String, RSGroupInfo> newGroupMap) throws IOException {
     // For offline mode persistence is still unavailable
     // We're refreshing in-memory state but only for servers in default group
     if (!isOnline()) {
-      if (newGroupMap == this.rsGroupMap) {
+      if (newGroupMap == holder.groupName2Group) {
         // When newGroupMap is this.rsGroupMap itself,
         // do not need to check default group and other groups as followed
         return;
       }
 
-      Map<String, RSGroupInfo> oldGroupMap = Maps.newHashMap(rsGroupMap);
+      Map<String, RSGroupInfo> oldGroupMap = Maps.newHashMap(holder.groupName2Group);
       RSGroupInfo oldDefaultGroup = oldGroupMap.remove(RSGroupInfo.DEFAULT_GROUP);
       RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP);
       if (!oldGroupMap.equals(newGroupMap) /* compare both tables and servers in other groups */ ||
@@ -438,7 +548,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
       // Refresh rsGroupMap
       // according to the inputted newGroupMap (an updated copy of rsGroupMap)
-      rsGroupMap = newGroupMap;
+      this.holder = new RSGroupInfoHolder(newGroupMap);
 
       // Do not need to update tableMap
       // because only the update on servers in default group is allowed above,
@@ -495,8 +605,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
    * Make changes visible. Caller must be synchronized on 'this'.
    */
   private void resetRSGroupMap(Map<String, RSGroupInfo> newRSGroupMap) {
-    // Make maps Immutable.
-    this.rsGroupMap = Collections.unmodifiableMap(newRSGroupMap);
+    this.holder = new RSGroupInfoHolder(newRSGroupMap);
   }
 
   /**
@@ -549,6 +658,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   // Called by ServerEventsListenerThread. Synchronize on this because redoing
   // the rsGroupMap then writing it out.
   private synchronized void updateDefaultServers(SortedSet<Address> servers) {
+    Map<String, RSGroupInfo> rsGroupMap = holder.groupName2Group;
     RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP);
     RSGroupInfo newInfo = new RSGroupInfo(info.getName(), servers);
     HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
@@ -647,6 +757,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
           online = true;
           // flush any inconsistencies between ZK and HTable
           RSGroupInfoManagerImpl.this.flushConfig();
+          // migrate after we are online.
+          migrate();
           return true;
         } catch (Exception e) {
           LOG.warn("Failed to perform check", e);
@@ -725,4 +837,10 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       throw new ConstraintException("RSGroup name should only contain alphanumeric characters");
     }
   }
+
+
+  @Override
+  public RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException {
+    return holder.tableName2Group.get(tableName);
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java
index a08d236..af30049 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java
@@ -34,12 +34,11 @@ final class RSGroupUtil {
   private RSGroupUtil() {
   }
 
-  @FunctionalInterface
-  private interface GetRSGroup {
-    RSGroupInfo get(String groupName) throws IOException;
-  }
-
-  private static Optional<RSGroupInfo> getRSGroupInfo(MasterServices master, GetRSGroup getter,
+  /**
+   * Will try to get the rsgroup from {@link TableDescriptor} first, and then try to get the rsgroup
+   * from the {@link NamespaceDescriptor}. If still not present, return empty.
+   */
+  static Optional<RSGroupInfo> getRSGroupInfo(MasterServices master, RSGroupInfoManager manager,
       TableName tableName) throws IOException {
     TableDescriptor td = master.getTableDescriptors().get(tableName);
     if (td == null) {
@@ -47,11 +46,17 @@ final class RSGroupUtil {
     }
     Optional<String> optGroupNameOfTable = td.getRegionServerGroup();
     if (optGroupNameOfTable.isPresent()) {
-      RSGroupInfo group = getter.get(optGroupNameOfTable.get());
+      RSGroupInfo group = manager.getRSGroup(optGroupNameOfTable.get());
       if (group != null) {
         return Optional.of(group);
       }
     }
+    // for backward compatible, where we may still have table configs in the RSGroupInfo after
+    // upgrading when migrating is still on-going.
+    RSGroupInfo groupFromOldRSGroupInfo = manager.getRSGroupForTable(tableName);
+    if (groupFromOldRSGroupInfo != null) {
+      return Optional.of(groupFromOldRSGroupInfo);
+    }
     ClusterSchema clusterSchema = master.getClusterSchema();
     if (clusterSchema == null) {
       if (TableName.isMetaTableName(tableName)) {
@@ -67,25 +72,7 @@ final class RSGroupUtil {
     if (groupNameOfNs == null) {
       return Optional.empty();
     }
-    return Optional.ofNullable(getter.get(groupNameOfNs));
-  }
-
-  /**
-   * Will try to get the rsgroup from {@link TableDescriptor} first, and then try to get the rsgroup
-   * from the {@link NamespaceDescriptor}. If still not present, return empty.
-   */
-  static Optional<RSGroupInfo> getRSGroupInfo(MasterServices master, RSGroupInfoManager manager,
-      TableName tableName) throws IOException {
-    return getRSGroupInfo(master, manager::getRSGroup, tableName);
-  }
-
-  /**
-   * Will try to get the rsgroup from {@link TableDescriptor} first, and then try to get the rsgroup
-   * from the {@link NamespaceDescriptor}. If still not present, return empty.
-   */
-  static Optional<RSGroupInfo> getRSGroupInfo(MasterServices master, RSGroupAdmin admin,
-      TableName tableName) throws IOException {
-    return getRSGroupInfo(master, admin::getRSGroupInfo, tableName);
+    return Optional.ofNullable(manager.getRSGroup(groupNameOfNs));
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java
new file mode 100644
index 0000000..f61e18a
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import static org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl.META_FAMILY_BYTES;
+import static org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl.META_QUALIFIER_BYTES;
+import static org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.zookeeper.KeeperException;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Testcase for HBASE-22819
+ */
+@Category({ MediumTests.class })
+public class TestMigrateRSGroupInfo extends TestRSGroupsBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestMigrateRSGroupInfo.class);
+
+  private static String TABLE_NAME_PREFIX = "Table_";
+
+  private static int NUM_TABLES = 10;
+
+  private static byte[] FAMILY = Bytes.toBytes("family");
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setClass(HConstants.MASTER_IMPL, HMasterForTest.class,
+      HMaster.class);
+    setUpTestBeforeClass();
+    for (int i = 0; i < NUM_TABLES; i++) {
+      TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_PREFIX + i), FAMILY);
+    }
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    tearDownAfterClass();
+  }
+
+  private static CountDownLatch RESUME = new CountDownLatch(1);
+
+  public static final class HMasterForTest extends HMaster {
+
+    public HMasterForTest(Configuration conf) throws IOException, KeeperException {
+      super(conf);
+    }
+
+    @Override
+    public TableDescriptors getTableDescriptors() {
+      if (RESUME != null) {
+        for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
+          if (element.getClassName().contains("RSGroupInfoManagerImpl")) {
+            try {
+              RESUME.await();
+            } catch (InterruptedException e) {
+            }
+            RESUME = null;
+            break;
+          }
+        }
+      }
+      return super.getTableDescriptors();
+    }
+  }
+
+  @Test
+  public void testMigrate() throws IOException, InterruptedException {
+    String groupName = name.getMethodName();
+    addGroup(groupName, TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().size() - 1);
+    RSGroupInfo rsGroupInfo = rsGroupAdmin.getRSGroupInfo(groupName);
+    assertTrue(rsGroupInfo.getTables().isEmpty());
+    for (int i = 0; i < NUM_TABLES; i++) {
+      rsGroupInfo.addTable(TableName.valueOf(TABLE_NAME_PREFIX + i));
+    }
+    try (Table table = TEST_UTIL.getConnection().getTable(RSGROUP_TABLE_NAME)) {
+      RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(rsGroupInfo);
+      Put p = new Put(Bytes.toBytes(rsGroupInfo.getName()));
+      p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
+      table.put(p);
+    }
+    TEST_UTIL.getMiniHBaseCluster().stopMaster(0).join();
+    RESUME = new CountDownLatch(1);
+    TEST_UTIL.getMiniHBaseCluster().startMaster();
+
+    // wait until we can get the rs group info for a table
+    TEST_UTIL.waitFor(30000, () -> {
+      try {
+        rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + 0));
+        return true;
+      } catch (IOException e) {
+        return false;
+      }
+    });
+    // confirm that before migrating, we could still get the correct rs group for a table.
+    for (int i = 0; i < NUM_TABLES; i++) {
+      RSGroupInfo info =
+        rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + i));
+      assertEquals(rsGroupInfo.getName(), info.getName());
+      assertEquals(NUM_TABLES, info.getTables().size());
+    }
+    RESUME.countDown();
+    TEST_UTIL.waitFor(60000, () -> {
+      for (int i = 0; i < NUM_TABLES; i++) {
+        TableDescriptor td;
+        try {
+          td = TEST_UTIL.getAdmin().getDescriptor(TableName.valueOf(TABLE_NAME_PREFIX + i));
+        } catch (IOException e) {
+          return false;
+        }
+        if (!rsGroupInfo.getName().equals(td.getRegionServerGroup().orElse(null))) {
+          return false;
+        }
+      }
+      return true;
+    });
+    // make sure that we persist the result to hbase, where we delete all the tables in the rs
+    // group.
+    TEST_UTIL.waitFor(30000, () -> {
+      try (Table table = TEST_UTIL.getConnection().getTable(RSGROUP_TABLE_NAME)) {
+        Result result = table.get(new Get(Bytes.toBytes(rsGroupInfo.getName())));
+        RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo
+          .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES));
+        RSGroupInfo gi = ProtobufUtil.toGroupInfo(proto);
+        return gi.getTables().isEmpty();
+      }
+    });
+    // make sure that the migrate thread has quit.
+    TEST_UTIL.waitFor(30000, () -> Thread.getAllStackTraces().keySet().stream()
+      .noneMatch(t -> t.getName().equals(RSGroupInfoManagerImpl.MIGRATE_THREAD_NAME)));
+    // make sure we could still get the correct rs group info after migration
+    for (int i = 0; i < NUM_TABLES; i++) {
+      RSGroupInfo info =
+        rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + i));
+      assertEquals(rsGroupInfo.getName(), info.getName());
+      assertEquals(NUM_TABLES, info.getTables().size());
+    }
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index b91cd5e..b9885be 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -71,7 +71,7 @@ public abstract class TestRSGroupsBase {
   protected final static Random rand = new Random();
 
   //shared, cluster type specific
-  protected static HBaseTestingUtility TEST_UTIL;
+  protected static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   protected static Admin admin;
   protected static HBaseCluster cluster;
   protected static RSGroupAdminClient rsGroupAdmin;
@@ -90,7 +90,6 @@ public abstract class TestRSGroupsBase {
   protected TableName tableName;
 
   public static void setUpTestBeforeClass() throws Exception {
-    TEST_UTIL = new HBaseTestingUtility();
     TEST_UTIL.getConfiguration().setFloat(
             "hbase.master.balancer.stochastic.tableSkewCost", 6000);
     TEST_UTIL.getConfiguration().set(


[hbase] 09/11: HBASE-22987 Calculate the region servers in default group in foreground (#599)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 5d4786e1de6c0d2461e4c18db0c636bb693e96da
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Wed Sep 11 22:10:52 2019 +0800

    HBASE-22987 Calculate the region servers in default group in foreground (#599)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      | 138 +++++----------------
 1 file changed, 32 insertions(+), 106 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 6725066..7224869 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -30,6 +30,7 @@ import java.util.OptionalLong;
 import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
+import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -174,8 +175,6 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   private final RSGroupStartupWorker rsGroupStartupWorker;
   // contains list of groups that were last flushed to persistent store
   private Set<String> prevRSGroups = new HashSet<>();
-  private final ServerEventsListenerThread serverEventsListenerThread =
-    new ServerEventsListenerThread();
 
   private RSGroupInfoManagerImpl(MasterServices masterServices) throws IOException {
     this.masterServices = masterServices;
@@ -184,11 +183,34 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     this.rsGroupStartupWorker = new RSGroupStartupWorker();
   }
 
+  private synchronized void updateDefaultServers() {
+    LOG.info("Updating default servers.");
+    Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(holder.groupName2Group);
+    RSGroupInfo oldDefaultGroupInfo = getRSGroup(RSGroupInfo.DEFAULT_GROUP);
+    assert oldDefaultGroupInfo != null;
+    RSGroupInfo newDefaultGroupInfo =
+      new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers());
+    newDefaultGroupInfo.addAllTables(oldDefaultGroupInfo.getTables());
+    newGroupMap.put(RSGroupInfo.DEFAULT_GROUP, newDefaultGroupInfo);
+    // do not need to persist, as we do not persist default group.
+    resetRSGroupMap(newGroupMap);
+    LOG.info("Updated default servers, {} servers", newDefaultGroupInfo.getServers().size());
+  }
 
   private synchronized void init() throws IOException {
     refresh(false);
-    serverEventsListenerThread.start();
-    masterServices.getServerManager().registerListener(serverEventsListenerThread);
+    masterServices.getServerManager().registerListener(new ServerListener() {
+
+      @Override
+      public void serverAdded(ServerName serverName) {
+        updateDefaultServers();
+      }
+
+      @Override
+      public void serverRemoved(ServerName serverName) {
+        updateDefaultServers();
+      }
+    });
     migrate();
   }
 
@@ -225,19 +247,11 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   }
 
   /**
-   * @param master the master to get online servers for
    * @return Set of online Servers named for their hostname and port (not ServerName).
    */
-  private static Set<Address> getOnlineServers(final MasterServices master) {
-    Set<Address> onlineServers = new HashSet<Address>();
-    if (master == null) {
-      return onlineServers;
-    }
-
-    for (ServerName server : master.getServerManager().getOnlineServers().keySet()) {
-      onlineServers.add(server.getAddress());
-    }
-    return onlineServers;
+  private Set<Address> getOnlineServers() {
+    return masterServices.getServerManager().getOnlineServers().keySet().stream()
+      .map(ServerName::getAddress).collect(Collectors.toSet());
   }
 
   @Override
@@ -249,8 +263,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     // it. If not 'default' group, add server to 'dst' rsgroup EVEN IF IT IS NOT online (could be a
     // rsgroup of dead servers that are to come back later).
     Set<Address> onlineServers =
-      dst.getName().equals(RSGroupInfo.DEFAULT_GROUP) ? getOnlineServers(this.masterServices)
-        : null;
+      dst.getName().equals(RSGroupInfo.DEFAULT_GROUP) ? getOnlineServers() : null;
     for (Address el : servers) {
       src.removeServer(el);
       if (onlineServers != null) {
@@ -617,25 +630,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     this.prevRSGroups.addAll(currentGroups);
   }
 
-  // Called by getDefaultServers. Presume it has lock in place.
-  private List<ServerName> getOnlineRS() throws IOException {
-    if (masterServices != null) {
-      return masterServices.getServerManager().getOnlineServersList();
-    }
-    LOG.debug("Reading online RS from zookeeper");
-    List<ServerName> servers = new ArrayList<>();
-    try {
-      for (String el : ZKUtil.listChildrenNoWatch(watcher, watcher.getZNodePaths().rsZNode)) {
-        servers.add(ServerName.parseServerName(el));
-      }
-    } catch (KeeperException e) {
-      throw new IOException("Failed to retrieve server list from zookeeper", e);
-    }
-    return servers;
-  }
-
   // Called by ServerEventsListenerThread. Presume it has lock on this manager when it runs.
-  private SortedSet<Address> getDefaultServers() throws IOException {
+  private SortedSet<Address> getDefaultServers() {
     // Build a list of servers in other groups than default group, from rsGroupMap
     Set<Address> serversInOtherGroup = new HashSet<>();
     for (RSGroupInfo group : listRSGroups() /* get from rsGroupMap */) {
@@ -646,7 +642,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
     // Get all online servers from Zookeeper and find out servers in default group
     SortedSet<Address> defaultServers = Sets.newTreeSet();
-    for (ServerName serverName : getOnlineRS()) {
+    for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) {
       Address server = Address.fromParts(serverName.getHostname(), serverName.getPort());
       if (!serversInOtherGroup.contains(server)) { // not in other groups
         defaultServers.add(server);
@@ -655,76 +651,6 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     return defaultServers;
   }
 
-  // Called by ServerEventsListenerThread. Synchronize on this because redoing
-  // the rsGroupMap then writing it out.
-  private synchronized void updateDefaultServers(SortedSet<Address> servers) {
-    Map<String, RSGroupInfo> rsGroupMap = holder.groupName2Group;
-    RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP);
-    RSGroupInfo newInfo = new RSGroupInfo(info.getName(), servers);
-    HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
-    newGroupMap.put(newInfo.getName(), newInfo);
-    resetRSGroupMap(newGroupMap);
-  }
-
-  /**
-   * Calls {@link RSGroupInfoManagerImpl#updateDefaultServers(SortedSet)} to update list of known
-   * servers. Notifications about server changes are received by registering {@link ServerListener}.
-   * As a listener, we need to return immediately, so the real work of updating the servers is done
-   * asynchronously in this thread.
-   */
-  private class ServerEventsListenerThread extends Thread implements ServerListener {
-    private final Logger LOG = LoggerFactory.getLogger(ServerEventsListenerThread.class);
-    private boolean changed = false;
-
-    ServerEventsListenerThread() {
-      setDaemon(true);
-    }
-
-    @Override
-    public void serverAdded(ServerName serverName) {
-      serverChanged();
-    }
-
-    @Override
-    public void serverRemoved(ServerName serverName) {
-      serverChanged();
-    }
-
-    private synchronized void serverChanged() {
-      changed = true;
-      this.notify();
-    }
-
-    @Override
-    public void run() {
-      setName(ServerEventsListenerThread.class.getName() + "-" + masterServices.getServerName());
-      SortedSet<Address> prevDefaultServers = new TreeSet<>();
-      while (isMasterRunning(masterServices)) {
-        try {
-          LOG.info("Updating default servers.");
-          SortedSet<Address> servers = RSGroupInfoManagerImpl.this.getDefaultServers();
-          if (!servers.equals(prevDefaultServers)) {
-            RSGroupInfoManagerImpl.this.updateDefaultServers(servers);
-            prevDefaultServers = servers;
-            LOG.info("Updated with servers: " + servers.size());
-          }
-          try {
-            synchronized (this) {
-              while (!changed) {
-                wait();
-              }
-              changed = false;
-            }
-          } catch (InterruptedException e) {
-            LOG.warn("Interrupted", e);
-          }
-        } catch (IOException e) {
-          LOG.warn("Failed to update default servers", e);
-        }
-      }
-    }
-  }
-
   private class RSGroupStartupWorker extends Thread {
     private final Logger LOG = LoggerFactory.getLogger(RSGroupStartupWorker.class);
     private volatile boolean online = false;


[hbase] 06/11: HBASE-22820 Do not need to persist default rs group now (#482)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d3441b52189236a57a42d663d6a6250207e20e9a
Author: linkaline <li...@gmail.com>
AuthorDate: Fri Aug 16 16:52:41 2019 +0800

    HBASE-22820 Do not need to persist default rs group now (#482)
    
    Signed-off-by: Duo Zhang <zh...@apache.org>
---
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      | 40 +++++++++++++---------
 .../hbase/rsgroup/VerifyingRSGroupAdminClient.java | 25 ++++++++++++++
 2 files changed, 49 insertions(+), 16 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 37f3ce6..eaf23f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -396,11 +396,13 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     }
 
     // populate puts
-    for (RSGroupInfo RSGroupInfo : groupMap.values()) {
-      RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(RSGroupInfo);
-      Put p = new Put(Bytes.toBytes(RSGroupInfo.getName()));
-      p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
-      mutations.add(p);
+    for (RSGroupInfo gi : groupMap.values()) {
+      if (!gi.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
+        RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(gi);
+        Put p = new Put(Bytes.toBytes(gi.getName()));
+        p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
+        mutations.add(p);
+      }
     }
 
     if (mutations.size() > 0) {
@@ -449,7 +451,12 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
     // Make changes visible after having been persisted to the source of truth
     resetRSGroupMap(newGroupMap);
+    saveRSGroupMapToZK(newGroupMap);
 
+    updateCacheOfRSGroups(newGroupMap.keySet());
+  }
+
+  private void saveRSGroupMapToZK(Map<String, RSGroupInfo> newGroupMap) throws IOException {
     try {
       String groupBasePath =
           ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE);
@@ -463,14 +470,16 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
         }
       }
 
-      for (RSGroupInfo RSGroupInfo : newGroupMap.values()) {
-        String znode = ZNodePaths.joinZNode(groupBasePath, RSGroupInfo.getName());
-        RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(RSGroupInfo);
-        LOG.debug("Updating znode: " + znode);
-        ZKUtil.createAndFailSilent(watcher, znode);
-        zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode));
-        zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode,
-          ProtobufUtil.prependPBMagic(proto.toByteArray())));
+      for (RSGroupInfo gi : newGroupMap.values()) {
+        if (!gi.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
+          String znode = ZNodePaths.joinZNode(groupBasePath, gi.getName());
+          RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(gi);
+          LOG.debug("Updating znode: " + znode);
+          ZKUtil.createAndFailSilent(watcher, znode);
+          zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode));
+          zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode,
+              ProtobufUtil.prependPBMagic(proto.toByteArray())));
+        }
       }
       LOG.debug("Writing ZK GroupInfo count: " + zkOps.size());
 
@@ -480,7 +489,6 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       masterServices.abort("Failed to write to rsGroupZNode", e);
       throw new IOException("Failed to write to rsGroupZNode", e);
     }
-    updateCacheOfRSGroups(newGroupMap.keySet());
   }
 
   /**
@@ -540,12 +548,12 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
   // Called by ServerEventsListenerThread. Synchronize on this because redoing
   // the rsGroupMap then writing it out.
-  private synchronized void updateDefaultServers(SortedSet<Address> servers) throws IOException {
+  private synchronized void updateDefaultServers(SortedSet<Address> servers) {
     RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP);
     RSGroupInfo newInfo = new RSGroupInfo(info.getName(), servers);
     HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
     newGroupMap.put(newInfo.getName(), newInfo);
-    flushConfig(newGroupMap);
+    resetRSGroupMap(newGroupMap);
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index a8cd277..28131a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -26,8 +26,11 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.SortedSet;
+import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
@@ -133,6 +136,13 @@ public class VerifyingRSGroupAdminClient extends RSGroupAdminClient {
       tds.addAll(admin.listTableDescriptors());
       tds.addAll(admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME));
     }
+    SortedSet<Address> lives = Sets.newTreeSet();
+    for (ServerName sn : conn.getAdmin().getClusterMetrics().getLiveServerMetrics().keySet()) {
+      lives.add(sn.getAddress());
+    }
+    for (ServerName sn : conn.getAdmin().listDecommissionedRegionServers()) {
+      lives.remove(sn.getAddress());
+    }
     try (Table table = conn.getTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME);
         ResultScanner scanner = table.getScanner(new Scan())) {
       for (;;) {
@@ -144,8 +154,22 @@ public class VerifyingRSGroupAdminClient extends RSGroupAdminClient {
           RSGroupInfoManagerImpl.META_FAMILY_BYTES, RSGroupInfoManagerImpl.META_QUALIFIER_BYTES));
         RSGroupInfo rsGroupInfo = ProtobufUtil.toGroupInfo(proto);
         groupMap.put(proto.getName(), RSGroupUtil.fillTables(rsGroupInfo, tds));
+        for(Address address : rsGroupInfo.getServers()){
+          lives.remove(address);
+        }
       }
     }
+    SortedSet<TableName> tables = Sets.newTreeSet();
+    for (TableDescriptor td : conn.getAdmin().listTableDescriptors(Pattern.compile(".*"),
+        true)){
+      String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
+      if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) {
+        tables.add(td.getTableName());
+      }
+    }
+
+    groupMap.put(RSGroupInfo.DEFAULT_GROUP,
+        new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, lives, tables));
     assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(wrapped.listRSGroups()));
     try {
       String groupBasePath = ZNodePaths.joinZNode(zkw.getZNodePaths().baseZNode, "rsgroup");
@@ -160,6 +184,7 @@ public class VerifyingRSGroupAdminClient extends RSGroupAdminClient {
           zList.add(RSGroupUtil.fillTables(rsGroupInfo, tds));
         }
       }
+      groupMap.remove(RSGroupInfo.DEFAULT_GROUP);
       assertEquals(zList.size(), groupMap.size());
       for (RSGroupInfo rsGroupInfo : zList) {
         assertTrue(groupMap.get(rsGroupInfo.getName()).equals(rsGroupInfo));


[hbase] 01/11: HBASE-22664 Move protobuf stuff in hbase-rsgroup to hbase-protocol-shaded (#362)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 096d27e69ad019ca48658f472d706e512fca9625
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Jul 9 09:51:19 2019 +0800

    HBASE-22664 Move protobuf stuff in hbase-rsgroup to hbase-protocol-shaded (#362)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 29 +++++++---
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 27 ++++++++++
 .../src/main/protobuf/RSGroup.proto                | 33 ++++++++++++
 .../src/main/protobuf/RSGroupAdmin.proto           |  2 +-
 .../src/main/protobuf/RSGroupAdmin.proto           |  0
 hbase-rsgroup/pom.xml                              | 18 -------
 .../hadoop/hbase/rsgroup/RSGroupAdminClient.java   | 10 ++--
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 10 ++--
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      |  8 +--
 .../hadoop/hbase/rsgroup/RSGroupProtobufUtil.java  | 63 ----------------------
 .../hbase/rsgroup/VerifyingRSGroupAdminClient.java |  5 +-
 11 files changed, 96 insertions(+), 109 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index a73a2bc..6f72004 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -1772,23 +1772,36 @@ public final class ProtobufUtil {
     return ServerName.valueOf(hostname, port, -1L);
   }
 
+  public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) {
+    if (timeRange == null) {
+      timeRange = TimeRange.allTime();
+    }
+    return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()).setTo(timeRange.getMax())
+        .build();
+  }
+
   public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) {
     RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName());
-    for(HBaseProtos.ServerName el: proto.getServersList()) {
+    for (HBaseProtos.ServerName el : proto.getServersList()) {
       RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort()));
     }
-    for(HBaseProtos.TableName pTableName: proto.getTablesList()) {
+    for (HBaseProtos.TableName pTableName : proto.getTablesList()) {
       RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName));
     }
     return RSGroupInfo;
   }
 
-  public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) {
-    if (timeRange == null) {
-      timeRange = TimeRange.allTime();
+  public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) {
+    List<HBaseProtos.TableName> tables = new ArrayList<>(pojo.getTables().size());
+    for (TableName arg : pojo.getTables()) {
+      tables.add(ProtobufUtil.toProtoTableName(arg));
+    }
+    List<HBaseProtos.ServerName> hostports = new ArrayList<>(pojo.getServers().size());
+    for (Address el : pojo.getServers()) {
+      hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname())
+          .setPort(el.getPort()).build());
     }
-    return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin())
-      .setTo(timeRange.getMax())
-      .build();
+    return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports)
+        .addAllTables(tables).build();
   }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 8108217..24b8dde 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
 import org.apache.hadoop.hbase.protobuf.ProtobufMessageConverter;
 import org.apache.hadoop.hbase.quotas.QuotaScope;
@@ -100,6 +101,7 @@ import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
 import org.apache.hadoop.hbase.quotas.ThrottleType;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.security.visibility.Authorizations;
 import org.apache.hadoop.hbase.security.visibility.CellVisibility;
 import org.apache.hadoop.hbase.util.Addressing;
@@ -176,6 +178,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableD
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
@@ -3348,4 +3351,28 @@ public final class ProtobufUtil {
       .build();
   }
 
+  public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) {
+    RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName());
+    for (HBaseProtos.ServerName el : proto.getServersList()) {
+      RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort()));
+    }
+    for (HBaseProtos.TableName pTableName : proto.getTablesList()) {
+      RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName));
+    }
+    return RSGroupInfo;
+  }
+
+  public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) {
+    List<HBaseProtos.TableName> tables = new ArrayList<>(pojo.getTables().size());
+    for (TableName arg : pojo.getTables()) {
+      tables.add(ProtobufUtil.toProtoTableName(arg));
+    }
+    List<HBaseProtos.ServerName> hostports = new ArrayList<>(pojo.getServers().size());
+    for (Address el : pojo.getServers()) {
+      hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname())
+          .setPort(el.getPort()).build());
+    }
+    return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports)
+        .addAllTables(tables).build();
+  }
 }
diff --git a/hbase-protocol-shaded/src/main/protobuf/RSGroup.proto b/hbase-protocol-shaded/src/main/protobuf/RSGroup.proto
new file mode 100644
index 0000000..ede2b13
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/RSGroup.proto
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "RSGroupProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+message RSGroupInfo {
+  required string name = 1;
+  repeated ServerName servers = 4;
+  repeated TableName tables = 3;
+}
diff --git a/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto b/hbase-protocol-shaded/src/main/protobuf/RSGroupAdmin.proto
similarity index 98%
copy from hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto
copy to hbase-protocol-shaded/src/main/protobuf/RSGroupAdmin.proto
index 416097b..1db7136 100644
--- a/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RSGroupAdmin.proto
@@ -18,7 +18,7 @@
 
 package hbase.pb;
 
-option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
 option java_outer_classname = "RSGroupAdminProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
diff --git a/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto
similarity index 100%
rename from hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto
rename to hbase-protocol/src/main/protobuf/RSGroupAdmin.proto
diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml
index 0bef660..09c8e73 100644
--- a/hbase-rsgroup/pom.xml
+++ b/hbase-rsgroup/pom.xml
@@ -44,24 +44,6 @@
         <artifactId>maven-source-plugin</artifactId>
       </plugin>
       <plugin>
-        <groupId>org.xolstice.maven.plugins</groupId>
-        <artifactId>protobuf-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>compile-protoc</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>compile</goal>
-            </goals>
-            <configuration>
-              <additionalProtoPathElements>
-                <additionalProtoPathElement>${basedir}/../hbase-protocol/src/main/protobuf</additionalProtoPathElement>
-              </additionalProtoPathElements>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
         <configuration>
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
index e8a1410..e7ab7f2 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
@@ -18,12 +18,10 @@
 package org.apache.hadoop.hbase.rsgroup;
 
 import com.google.protobuf.ServiceException;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
-
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Admin;
@@ -70,7 +68,7 @@ public class RSGroupAdminClient implements RSGroupAdmin {
       GetRSGroupInfoResponse resp = stub.getRSGroupInfo(null,
           GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build());
       if(resp.hasRSGroupInfo()) {
-        return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
+        return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
       }
       return null;
     } catch (ServiceException e) {
@@ -85,7 +83,7 @@ public class RSGroupAdminClient implements RSGroupAdmin {
     try {
       GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request);
       if (resp.hasRSGroupInfo()) {
-        return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
+        return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
       }
       return null;
     } catch (ServiceException e) {
@@ -167,7 +165,7 @@ public class RSGroupAdminClient implements RSGroupAdmin {
           ListRSGroupInfosRequest.getDefaultInstance()).getRSGroupInfoList();
       List<RSGroupInfo> result = new ArrayList<>(resp.size());
       for(RSGroupProtos.RSGroupInfo entry : resp) {
-        result.add(RSGroupProtobufUtil.toGroupInfo(entry));
+        result.add(ProtobufUtil.toGroupInfo(entry));
       }
       return result;
     } catch (ServiceException e) {
@@ -186,7 +184,7 @@ public class RSGroupAdminClient implements RSGroupAdmin {
     try {
       GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request);
       if (resp.hasRSGroupInfo()) {
-        return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
+        return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
       }
       return null;
     } catch (ServiceException e) {
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 090ac6e..8c6e010 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.rsgroup;
 import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
-
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashSet;
@@ -29,7 +28,6 @@ import java.util.List;
 import java.util.Optional;
 import java.util.Set;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
@@ -164,7 +162,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
         checkPermission("getRSGroupInfo");
         RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
         if (rsGroupInfo != null) {
-          builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(rsGroupInfo));
+          builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
         }
         if (master.getMasterCoprocessorHost() != null) {
           master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
@@ -189,7 +187,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
         checkPermission("getRSGroupInfoOfTable");
         RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName);
         if (RSGroupInfo != null) {
-          builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo));
+          builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
         }
         if (master.getMasterCoprocessorHost() != null) {
           master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
@@ -326,7 +324,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
         }
         checkPermission("listRSGroup");
         for (RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) {
-          builder.addRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo));
+          builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
         }
         if (master.getMasterCoprocessorHost() != null) {
           master.getMasterCoprocessorHost().postListRSGroups();
@@ -352,7 +350,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
         checkPermission("getRSGroupInfoOfServer");
         RSGroupInfo info = groupAdminServer.getRSGroupOfServer(hp);
         if (info != null) {
-          builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(info));
+          builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(info));
         }
         if (master.getMasterCoprocessorHost() != null) {
           master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index b2d168a..b54f088 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -360,7 +360,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
         }
         RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo
           .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES));
-        rsGroupInfoList.add(RSGroupProtobufUtil.toGroupInfo(proto));
+        rsGroupInfoList.add(ProtobufUtil.toGroupInfo(proto));
       }
     }
     return rsGroupInfoList;
@@ -383,7 +383,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
             ByteArrayInputStream bis =
               new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
             RSGroupInfoList
-              .add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
+              .add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
           }
         }
         LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size());
@@ -459,7 +459,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
     // populate puts
     for (RSGroupInfo RSGroupInfo : groupMap.values()) {
-      RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo);
+      RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(RSGroupInfo);
       Put p = new Put(Bytes.toBytes(RSGroupInfo.getName()));
       p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
       mutations.add(p);
@@ -532,7 +532,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
       for (RSGroupInfo RSGroupInfo : newGroupMap.values()) {
         String znode = ZNodePaths.joinZNode(groupBasePath, RSGroupInfo.getName());
-        RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo);
+        RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(RSGroupInfo);
         LOG.debug("Updating znode: " + znode);
         ZKUtil.createAndFailSilent(watcher, znode);
         zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode));
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java
deleted file mode 100644
index 56e35e7..0000000
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rsgroup;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
-import org.apache.yetus.audience.InterfaceAudience;
-
-@InterfaceAudience.Private
-final class RSGroupProtobufUtil {
-  private RSGroupProtobufUtil() {
-  }
-
-  static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) {
-    RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName());
-    for(HBaseProtos.ServerName el: proto.getServersList()) {
-      RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort()));
-    }
-    for(HBaseProtos.TableName pTableName: proto.getTablesList()) {
-      RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName));
-    }
-    return RSGroupInfo;
-  }
-
-  static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) {
-    List<HBaseProtos.TableName> tables = new ArrayList<>(pojo.getTables().size());
-    for(TableName arg: pojo.getTables()) {
-      tables.add(ProtobufUtil.toProtoTableName(arg));
-    }
-    List<HBaseProtos.ServerName> hostports = new ArrayList<>(pojo.getServers().size());
-    for(Address el: pojo.getServers()) {
-      hostports.add(HBaseProtos.ServerName.newBuilder()
-          .setHostName(el.getHostname())
-          .setPort(el.getPort())
-          .build());
-    }
-    return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName())
-        .addAllServers(hostports)
-        .addAllTables(tables).build();
-  }
-}
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index 88a4339..2ad30e4 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -129,7 +128,7 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
               result.getValue(
                   RSGroupInfoManager.META_FAMILY_BYTES,
                   RSGroupInfoManager.META_QUALIFIER_BYTES));
-      groupMap.put(proto.getName(), RSGroupProtobufUtil.toGroupInfo(proto));
+      groupMap.put(proto.getName(), ProtobufUtil.toGroupInfo(proto));
     }
     Assert.assertEquals(Sets.newHashSet(groupMap.values()),
         Sets.newHashSet(wrapped.listRSGroups()));
@@ -141,7 +140,7 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
           ProtobufUtil.expectPBMagicPrefix(data);
           ByteArrayInputStream bis = new ByteArrayInputStream(
               data, ProtobufUtil.lengthOfPBMagic(), data.length);
-          zList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
+          zList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
         }
       }
       Assert.assertEquals(zList.size(), groupMap.size());


[hbase] 05/11: HBASE-22809 Allow creating table in group when rs group contains no live servers (#464)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9f4e59438874466cfbf9c97872a0b2267a62116e
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Fri Aug 9 09:25:00 2019 +0800

    HBASE-22809 Allow creating table in group when rs group contains no live servers (#464)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 119 ++++++++++++---------
 .../hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java   |   4 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBasics.java   |  42 --------
 3 files changed, 71 insertions(+), 94 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 3c4530f..a2a5623 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -21,11 +21,12 @@ import com.google.protobuf.Service;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
+import java.util.Objects;
 import java.util.Optional;
 import java.util.Set;
+import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
@@ -68,7 +69,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
     groupInfoManager = RSGroupInfoManagerImpl.getInstance(master);
     groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
     Class<?> clazz =
-        master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
+      master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
     if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
       throw new IOException("Configured balancer does not support RegionServer groups.");
     }
@@ -108,85 +109,101 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
 
   @Override
   public void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      List<ServerName> servers, List<ServerName> notClearedServers) throws IOException {
+    List<ServerName> servers, List<ServerName> notClearedServers) throws IOException {
     Set<Address> clearedServer =
-        servers.stream().filter(server -> !notClearedServers.contains(server))
-            .map(ServerName::getAddress).collect(Collectors.toSet());
+      servers.stream().filter(server -> !notClearedServers.contains(server))
+        .map(ServerName::getAddress).collect(Collectors.toSet());
     if (!clearedServer.isEmpty()) {
       groupAdminServer.removeServers(clearedServer);
     }
   }
 
-  private void checkGroupExists(Optional<String> optGroupName) throws IOException {
+  private RSGroupInfo checkGroupExists(Optional<String> optGroupName, Supplier<String> forWhom)
+    throws IOException {
     if (optGroupName.isPresent()) {
       String groupName = optGroupName.get();
-      if (groupAdminServer.getRSGroupInfo(groupName) == null) {
-        throw new ConstraintException("Region server group " + groupName + " does not exit");
+      RSGroupInfo group = groupAdminServer.getRSGroupInfo(groupName);
+      if (group == null) {
+        throw new ConstraintException(
+          "Region server group " + groupName + " for " + forWhom.get() + " does not exit");
       }
+      return group;
     }
+    return null;
   }
 
-  private boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException {
-    RSGroupInfo rsGroupInfo;
-    Optional<String> optGroupName = desc.getRegionServerGroup();
-    if (optGroupName.isPresent()) {
-      String groupName = optGroupName.get();
-      if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) {
-        // do not check for default group
-        return true;
-      }
-      rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
-      if (rsGroupInfo == null) {
-        throw new ConstraintException(
-            "RSGroup " + groupName + " for table " + desc.getTableName() + " does not exist");
-      }
-    } else {
-      NamespaceDescriptor nd =
-          master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString());
-      String groupNameOfNs = nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
-      if (groupNameOfNs == null || groupNameOfNs.equals(RSGroupInfo.DEFAULT_GROUP)) {
-        // do not check for default group
-        return true;
-      }
-      rsGroupInfo = groupAdminServer.getRSGroupInfo(groupNameOfNs);
-      if (rsGroupInfo == null) {
-        throw new ConstraintException("RSGroup " + groupNameOfNs + " for table " +
-            desc.getTableName() + "(inherit from namespace) does not exist");
-      }
+  private Optional<String> getNamespaceGroup(NamespaceDescriptor namespaceDesc) {
+    return Optional
+      .ofNullable(namespaceDesc.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP));
+  }
+
+  // Do not allow creating new tables/namespaces which has an empty rs group, expect the default rs
+  // group. Notice that we do not check for online servers, as this is not stable because region
+  // servers can die at any time.
+  private void checkGroupNotEmpty(RSGroupInfo rsGroupInfo, Supplier<String> forWhom)
+    throws ConstraintException {
+    if (rsGroupInfo == null || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
+      // we do not have a rs group config or we explicitly set the rs group to default, then no need
+      // to check.
+      return;
+    }
+    if (rsGroupInfo.getServers().isEmpty()) {
+      throw new ConstraintException(
+        "No servers in the rsgroup " + rsGroupInfo.getName() + " for " + forWhom.get());
     }
-    return master.getServerManager().createDestinationServersList().stream()
-        .anyMatch(onlineServer -> rsGroupInfo.containsServer(onlineServer.getAddress()));
   }
 
   @Override
   public void preCreateTableAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      TableDescriptor desc, RegionInfo[] regions) throws IOException {
-    checkGroupExists(desc.getRegionServerGroup());
-    if (!desc.getTableName().isSystemTable() && !rsgroupHasServersOnline(desc)) {
-      throw new HBaseIOException("No online servers in the rsgroup for " + desc);
+    TableDescriptor desc, RegionInfo[] regions) throws IOException {
+    if (desc.getTableName().isSystemTable()) {
+      // do not check for system tables as we may block the bootstrap.
+      return;
+    }
+    Supplier<String> forWhom = () -> "table " + desc.getTableName();
+    RSGroupInfo rsGroupInfo = checkGroupExists(desc.getRegionServerGroup(), forWhom);
+    if (rsGroupInfo == null) {
+      // we do not set rs group info on table, check if we have one on namespace
+      String namespace = desc.getTableName().getNamespaceAsString();
+      NamespaceDescriptor nd = master.getClusterSchema().getNamespace(namespace);
+      forWhom = () -> "table " + desc.getTableName() + "(inherit from namespace)";
+      rsGroupInfo = checkGroupExists(getNamespaceGroup(nd), forWhom);
     }
+    checkGroupNotEmpty(rsGroupInfo, forWhom);
   }
 
   @Override
   public TableDescriptor preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor)
-      throws IOException {
-    checkGroupExists(newDescriptor.getRegionServerGroup());
+    TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor)
+    throws IOException {
+    if (!currentDescriptor.getRegionServerGroup().equals(newDescriptor.getRegionServerGroup())) {
+      Supplier<String> forWhom = () -> "table " + newDescriptor.getTableName();
+      RSGroupInfo rsGroupInfo = checkGroupExists(newDescriptor.getRegionServerGroup(), forWhom);
+      checkGroupNotEmpty(rsGroupInfo, forWhom);
+    }
     return MasterObserver.super.preModifyTable(ctx, tableName, currentDescriptor, newDescriptor);
   }
 
+  private void checkNamespaceGroup(NamespaceDescriptor nd) throws IOException {
+    Supplier<String> forWhom = () -> "namespace " + nd.getName();
+    RSGroupInfo rsGroupInfo = checkGroupExists(getNamespaceGroup(nd), forWhom);
+    checkGroupNotEmpty(rsGroupInfo, forWhom);
+  }
+
   @Override
   public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      NamespaceDescriptor ns) throws IOException {
-    checkGroupExists(
-      Optional.ofNullable(ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP)));
+    NamespaceDescriptor ns) throws IOException {
+    checkNamespaceGroup(ns);
   }
 
   @Override
   public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor)
-      throws IOException {
-    checkGroupExists(Optional
-        .ofNullable(newNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP)));
+    NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor)
+    throws IOException {
+    if (!Objects.equals(
+      currentNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP),
+      newNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) {
+      checkNamespaceGroup(newNsDescriptor);
+    }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
index 7471458..aff591f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
@@ -152,12 +152,14 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase {
     String nsName = tablePrefix + "_foo";
     String groupName = tablePrefix + "_foo";
     LOG.info("testNamespaceConstraint");
-    rsGroupAdmin.addRSGroup(groupName);
+    addGroup(groupName, 1);
     assertTrue(observer.preAddRSGroupCalled);
     assertTrue(observer.postAddRSGroupCalled);
 
     admin.createNamespace(NamespaceDescriptor.create(nsName)
       .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, groupName).build());
+    RSGroupInfo rsGroupInfo = rsGroupAdmin.getRSGroupInfo(groupName);
+    rsGroupAdmin.moveServers(rsGroupInfo.getServers(), RSGroupInfo.DEFAULT_GROUP);
     // test removing a referenced group
     try {
       rsGroupAdmin.removeRSGroup(groupName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
index e3cb54e..67ed2a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
@@ -20,11 +20,8 @@ package org.apache.hadoop.hbase.rsgroup;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -137,45 +134,6 @@ public class TestRSGroupsBasics extends TestRSGroupsBase {
   }
 
   @Test
-  public void testCreateWhenRsgroupNoOnlineServers() throws Exception {
-    LOG.info("testCreateWhenRsgroupNoOnlineServers");
-
-    // set rsgroup has no online servers and test create table
-    final RSGroupInfo appInfo = addGroup("appInfo", 1);
-    Iterator<Address> iterator = appInfo.getServers().iterator();
-    List<ServerName> serversToDecommission = new ArrayList<>();
-    ServerName targetServer = getServerName(iterator.next());
-    assertTrue(master.getServerManager().getOnlineServers().containsKey(targetServer));
-    serversToDecommission.add(targetServer);
-    admin.decommissionRegionServers(serversToDecommission, true);
-    assertEquals(1, admin.listDecommissionedRegionServers().size());
-
-    final TableName tableName = TableName.valueOf(tablePrefix + "_ns", name.getMethodName());
-    admin.createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString())
-      .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, appInfo.getName()).build());
-    final TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
-      .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build();
-    try {
-      admin.createTable(desc);
-      fail("Shouldn't create table successfully!");
-    } catch (Exception e) {
-      LOG.debug("create table error", e);
-    }
-
-    // recommission and test create table
-    admin.recommissionRegionServer(targetServer, null);
-    assertEquals(0, admin.listDecommissionedRegionServers().size());
-    admin.createTable(desc);
-    // wait for created table to be assigned
-    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() throws Exception {
-        return getTableRegionMap().get(desc.getTableName()) != null;
-      }
-    });
-  }
-
-  @Test
   public void testDefaultNamespaceCreateAndAssign() throws Exception {
     LOG.info("testDefaultNamespaceCreateAndAssign");
     String tableName = tablePrefix + "_testCreateAndAssign";


[hbase] 04/11: HBASE-22695 Store the rsgroup of a table in table configuration (#426)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 93d38fad03ddec8c55ef1c9f490029bacec2c501
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Sat Aug 3 07:53:27 2019 +0800

    HBASE-22695 Store the rsgroup of a table in table configuration (#426)
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../org/apache/hadoop/hbase/HTableDescriptor.java  |   6 +
 .../hadoop/hbase/client/TableDescriptor.java       |   8 +
 .../hbase/client/TableDescriptorBuilder.java       |  19 ++
 .../apache/hadoop/hbase/rsgroup/RSGroupInfo.java   |  42 ++-
 .../org/apache/hadoop/hbase/master/HMaster.java    |   4 +-
 .../apache/hadoop/hbase/master/LoadBalancer.java   |  49 +---
 .../hbase/master/assignment/AssignmentManager.java |   6 +-
 .../apache/hadoop/hbase/rsgroup/RSGroupAdmin.java  |  23 --
 .../hadoop/hbase/rsgroup/RSGroupAdminClient.java   |  13 +-
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 152 ++++------
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java   | 311 ++++++---------------
 .../hbase/rsgroup/RSGroupAdminServiceImpl.java     | 111 ++++++--
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java    |  47 ++--
 .../hadoop/hbase/rsgroup/RSGroupInfoManager.java   |  23 --
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      | 115 +-------
 .../apache/hadoop/hbase/rsgroup/RSGroupUtil.java   | 113 ++++++++
 .../hadoop/hbase/master/TestRegionPlacement2.java  |   6 +-
 .../balancer/RSGroupableBalancerTestBase.java      |  84 +++---
 .../balancer/TestRSGroupBasedLoadBalancer.java     |  42 ++-
 ...lancerWithStochasticLoadBalancerAsInternal.java |   4 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java   |   1 -
 .../hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java   | 104 +------
 .../hadoop/hbase/rsgroup/TestRSGroupsBalance.java  |  20 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java     |   8 +-
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java     |   6 +-
 .../hbase/rsgroup/VerifyingRSGroupAdminClient.java |  67 +++--
 26 files changed, 590 insertions(+), 794 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 8866eba..188bed6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -23,6 +23,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -987,4 +988,9 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
   protected ModifyableTableDescriptor getDelegateeForModification() {
     return delegatee;
   }
+
+  @Override
+  public Optional<String> getRegionServerGroup() {
+    return delegatee.getRegionServerGroup();
+  }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
index fc5e69e..a452387 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -23,6 +23,7 @@ import java.util.Collection;
 import java.util.Comparator;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.stream.Stream;
 import org.apache.hadoop.hbase.HConstants;
@@ -184,6 +185,13 @@ public interface TableDescriptor {
   String getOwnerString();
 
   /**
+   * Get the region server group this table belongs to. The regions of this table will be placed
+   * only on the region servers within this group. If not present, will be placed on
+   * {@link org.apache.hadoop.hbase.rsgroup.RSGroupInfo#DEFAULT_GROUP}.
+   */
+  Optional<String> getRegionServerGroup();
+
+  /**
    * Getter for accessing the metadata associated with the key.
    *
    * @param key The key.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index 037a7f8..09ee0c5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -188,6 +189,9 @@ public class TableDescriptorBuilder {
   private static final Bytes PRIORITY_KEY
           = new Bytes(Bytes.toBytes(PRIORITY));
 
+  private static final Bytes RSGROUP_KEY =
+      new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP));
+
   /**
    * Relative priority of the table used for rpc scheduling
    */
@@ -537,6 +541,11 @@ public class TableDescriptorBuilder {
     return this;
   }
 
+  public TableDescriptorBuilder setRegionServerGroup(String group) {
+    desc.setValue(RSGROUP_KEY, new Bytes(Bytes.toBytes(group)));
+    return this;
+  }
+
   public TableDescriptor build() {
     return new ModifyableTableDescriptor(desc);
   }
@@ -1577,6 +1586,16 @@ public class TableDescriptorBuilder {
     public int getColumnFamilyCount() {
       return families.size();
     }
+
+    @Override
+    public Optional<String> getRegionServerGroup() {
+      Bytes value = values.get(RSGROUP_KEY);
+      if (value != null) {
+        return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength()));
+      } else {
+        return Optional.empty();
+      }
+    }
   }
 
   private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index 25e827d..ad55d1f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -34,21 +34,38 @@ import org.apache.yetus.audience.InterfaceAudience;
 public class RSGroupInfo {
   public static final String DEFAULT_GROUP = "default";
   public static final String NAMESPACE_DESC_PROP_GROUP = "hbase.rsgroup.name";
+  public static final String TABLE_DESC_PROP_GROUP = "hbase.rsgroup.name";
 
   private final String name;
   // Keep servers in a sorted set so has an expected ordering when displayed.
   private final SortedSet<Address> servers;
   // Keep tables sorted too.
+  /**
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in
+   *             the configuration of a table so this will be removed.
+   */
+  @Deprecated
   private final SortedSet<TableName> tables;
 
   public RSGroupInfo(String name) {
     this(name, new TreeSet<Address>(), new TreeSet<TableName>());
   }
 
+  RSGroupInfo(String name, SortedSet<Address> servers) {
+    this.name = name;
+    this.servers = servers == null ? new TreeSet<>() : new TreeSet<>(servers);
+    this.tables = new TreeSet<>();
+  }
+
+  /**
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information for a table will be
+   *             stored in the configuration of a table so this will be removed.
+   */
+  @Deprecated
   RSGroupInfo(String name, SortedSet<Address> servers, SortedSet<TableName> tables) {
     this.name = name;
     this.servers = (servers == null) ? new TreeSet<>() : new TreeSet<>(servers);
-    this.tables  = (tables  == null) ? new TreeSet<>() : new TreeSet<>(tables);
+    this.tables = (tables == null) ? new TreeSet<>() : new TreeSet<>(tables);
   }
 
   public RSGroupInfo(RSGroupInfo src) {
@@ -100,23 +117,46 @@ public class RSGroupInfo {
 
   /**
    * Get set of tables that are members of the group.
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in
+   *             the configuration of a table so this will be removed.
    */
+  @Deprecated
   public SortedSet<TableName> getTables() {
     return tables;
   }
 
+  /**
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in
+   *             the configuration of a table so this will be removed.
+   */
+  @Deprecated
   public void addTable(TableName table) {
     tables.add(table);
   }
 
+  /**
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in
+   *             the configuration of a table so this will be removed.
+   */
+  @Deprecated
   public void addAllTables(Collection<TableName> arg) {
     tables.addAll(arg);
   }
 
+  /**
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in
+   *             the configuration of a table so this will be removed.
+   */
+  @Deprecated
   public boolean containsTable(TableName table) {
     return tables.contains(table);
   }
 
+  /**
+   * @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in
+   *             the configuration of a table so this will be removed.
+   */
+  @Deprecated
   public boolean removeTable(TableName table) {
     return tables.remove(table);
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index d077514..1562d92 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2013,7 +2013,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   // Replace with an async implementation from which you can get
   // a success/failure result.
   @VisibleForTesting
-  public void move(final byte[] encodedRegionName, byte[] destServerName) throws HBaseIOException {
+  public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException {
     RegionState regionState = assignmentManager.getRegionStates().
       getRegionState(Bytes.toString(encodedRegionName));
 
@@ -3625,7 +3625,7 @@ public class HMaster extends HRegionServer implements MasterServices {
    * @param servers Region servers to decommission.
    */
   public void decommissionRegionServers(final List<ServerName> servers, final boolean offload)
-      throws HBaseIOException {
+      throws IOException {
     List<ServerName> serversAdded = new ArrayList<>(servers.size());
     // Place the decommission marker first.
     String parentZnode = getZooKeeper().getZNodePaths().drainingZNode;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 816636f..0fc544a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -19,12 +19,12 @@
 package org.apache.hadoop.hbase.master;
 
 import edu.umd.cs.findbugs.annotations.Nullable;
+import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterMetrics;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
@@ -65,95 +65,72 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse
   ServerName BOGUS_SERVER_NAME = ServerName.valueOf("localhost,1,1");
 
   /**
-   * Set the current cluster status.  This allows a LoadBalancer to map host name to a server
-   * @param st
+   * Set the current cluster status. This allows a LoadBalancer to map host name to a server
    */
   void setClusterMetrics(ClusterMetrics st);
 
   /**
    * Pass RegionStates and allow balancer to set the current cluster load.
-   * @param ClusterLoad
    */
   void setClusterLoad(Map<TableName, Map<ServerName, List<RegionInfo>>> ClusterLoad);
 
   /**
    * Set the master service.
-   * @param masterServices
    */
   void setMasterServices(MasterServices masterServices);
 
   /**
    * Perform the major balance operation
-   * @param tableName
-   * @param clusterState
    * @return List of plans
    */
-  List<RegionPlan> balanceCluster(TableName tableName, Map<ServerName,
-      List<RegionInfo>> clusterState) throws HBaseIOException;
+  List<RegionPlan> balanceCluster(TableName tableName,
+      Map<ServerName, List<RegionInfo>> clusterState) throws IOException;
 
   /**
    * Perform the major balance operation
-   * @param clusterState
    * @return List of plans
    */
-  List<RegionPlan> balanceCluster(Map<ServerName,
-      List<RegionInfo>> clusterState) throws HBaseIOException;
+  List<RegionPlan> balanceCluster(Map<ServerName, List<RegionInfo>> clusterState)
+      throws IOException;
 
   /**
    * Perform a Round Robin assignment of regions.
-   * @param regions
-   * @param servers
    * @return Map of servername to regioninfos
    */
-  Map<ServerName, List<RegionInfo>> roundRobinAssignment(
-    List<RegionInfo> regions,
-    List<ServerName> servers
-  ) throws HBaseIOException;
+  Map<ServerName, List<RegionInfo>> roundRobinAssignment(List<RegionInfo> regions,
+      List<ServerName> servers) throws IOException;
 
   /**
    * Assign regions to the previously hosting region server
-   * @param regions
-   * @param servers
    * @return List of plans
    */
   @Nullable
-  Map<ServerName, List<RegionInfo>> retainAssignment(
-    Map<RegionInfo, ServerName> regions,
-    List<ServerName> servers
-  ) throws HBaseIOException;
+  Map<ServerName, List<RegionInfo>> retainAssignment(Map<RegionInfo, ServerName> regions,
+      List<ServerName> servers) throws IOException;
 
   /**
    * Get a random region server from the list
    * @param regionInfo Region for which this selection is being done.
-   * @param servers
-   * @return Servername
    */
-  ServerName randomAssignment(
-    RegionInfo regionInfo, List<ServerName> servers
-  ) throws HBaseIOException;
+  ServerName randomAssignment(RegionInfo regionInfo, List<ServerName> servers) throws IOException;
 
   /**
    * Initialize the load balancer. Must be called after setters.
-   * @throws HBaseIOException
    */
-  void initialize() throws HBaseIOException;
+  void initialize() throws IOException;
 
   /**
    * Marks the region as online at balancer.
-   * @param regionInfo
-   * @param sn
    */
   void regionOnline(RegionInfo regionInfo, ServerName sn);
 
   /**
    * Marks the region as offline at balancer.
-   * @param regionInfo
    */
   void regionOffline(RegionInfo regionInfo);
 
-  /*
+  /**
    * Notification that config has changed
-   * @param conf
    */
   @Override
   void onConfigurationChange(Configuration conf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index c318540..debdf68 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -684,7 +684,7 @@ public class AssignmentManager {
         this.master.getServerManager().createDestinationServersList(serversToExclude));
       // Return mid-method!
       return createAssignProcedures(assignments);
-    } catch (HBaseIOException hioe) {
+    } catch (IOException hioe) {
       LOG.warn("Failed roundRobinAssignment", hioe);
     }
     // If an error above, fall-through to this simpler assign. Last resort.
@@ -1999,7 +1999,7 @@ public class AssignmentManager {
       }
       try {
         acceptPlan(regions, balancer.retainAssignment(retainMap, servers));
-      } catch (HBaseIOException e) {
+      } catch (IOException e) {
         LOG.warn("unable to retain assignment", e);
         addToPendingAssignment(regions, retainMap.keySet());
       }
@@ -2014,7 +2014,7 @@ public class AssignmentManager {
       }
       try {
         acceptPlan(regions, balancer.roundRobinAssignment(hris, servers));
-      } catch (HBaseIOException e) {
+      } catch (IOException e) {
         LOG.warn("unable to round-robin assignment", e);
         addToPendingAssignment(regions, hris);
       }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
index 9ea996b..344d0b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.rsgroup;
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
-
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -36,22 +34,11 @@ public interface RSGroupAdmin {
   RSGroupInfo getRSGroupInfo(String groupName) throws IOException;
 
   /**
-   * Gets {@code RSGroupInfo} for the given table's group.
-   */
-  RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException;
-
-  /**
    * Move given set of servers to the specified target RegionServer group.
    */
   void moveServers(Set<Address> servers, String targetGroup) throws IOException;
 
   /**
-   * Move given set of tables to the specified target RegionServer group.
-   * This will unassign all of a table's region so it can be reassigned to the correct group.
-   */
-  void moveTables(Set<TableName> tables, String targetGroup) throws IOException;
-
-  /**
    * Creates a new RegionServer group with the given name.
    */
   void addRSGroup(String groupName) throws IOException;
@@ -80,16 +67,6 @@ public interface RSGroupAdmin {
   RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException;
 
   /**
-   * Move given set of servers and tables to the specified target RegionServer group.
-   * @param servers set of servers to move
-   * @param tables set of tables to move
-   * @param targetGroup the target group name
-   * @throws IOException if moving the server and tables fail
-   */
-  void moveServersAndTables(Set<Address> servers, Set<TableName> tables,
-                            String targetGroup) throws IOException;
-
-  /**
    * Remove decommissioned servers from rsgroup.
    * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline
    * the server for repairing. Or we need to move some servers to join other clusters.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
index e7ab7f2..07f0efd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServe
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 
 /**
@@ -62,12 +63,17 @@ public class RSGroupAdminClient implements RSGroupAdmin {
     stub = RSGroupAdminService.newBlockingStub(admin.coprocessorService());
   }
 
+  // for writing UTs
+  @VisibleForTesting
+  protected RSGroupAdminClient() {
+  }
+
   @Override
   public RSGroupInfo getRSGroupInfo(String groupName) throws IOException {
     try {
       GetRSGroupInfoResponse resp = stub.getRSGroupInfo(null,
-          GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build());
-      if(resp.hasRSGroupInfo()) {
+        GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build());
+      if (resp.hasRSGroupInfo()) {
         return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
       }
       return null;
@@ -76,7 +82,6 @@ public class RSGroupAdminClient implements RSGroupAdmin {
     }
   }
 
-  @Override
   public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException {
     GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder().setTableName(
         ProtobufUtil.toProtoTableName(tableName)).build();
@@ -111,7 +116,6 @@ public class RSGroupAdminClient implements RSGroupAdmin {
     }
   }
 
-  @Override
   public void moveTables(Set<TableName> tables, String targetGroup) throws IOException {
     MoveTablesRequest.Builder builder = MoveTablesRequest.newBuilder().setTargetGroup(targetGroup);
     for(TableName tableName: tables) {
@@ -192,7 +196,6 @@ public class RSGroupAdminClient implements RSGroupAdmin {
     }
   }
 
-  @Override
   public void moveServersAndTables(Set<Address> servers, Set<TableName> tables, String targetGroup)
       throws IOException {
     MoveServersAndTablesRequest.Builder builder =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 2d5af04..3c4530f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -27,13 +27,10 @@ import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.SnapshotDescription;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
@@ -47,21 +44,16 @@ import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 
 // TODO: Encapsulate MasterObserver functions into separate subclass.
 @CoreCoprocessor
 @InterfaceAudience.Private
 public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
-  static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminEndpoint.class);
-
-  private MasterServices master;
   // Only instance of RSGroupInfoManager. RSGroup aware load balancers ask for this instance on
   // their setup.
+  private MasterServices master;
   private RSGroupInfoManager groupInfoManager;
   private RSGroupAdminServer groupAdminServer;
   private RSGroupAdminServiceImpl groupAdminService = new RSGroupAdminServiceImpl();
@@ -110,117 +102,91 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
     return groupAdminService;
   }
 
-  private void assignTableToGroup(TableDescriptor desc) throws IOException {
-    String groupName =
-        master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
-            .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
-    if (groupName == null) {
-      groupName = RSGroupInfo.DEFAULT_GROUP;
-    }
-    RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
-    if (rsGroupInfo == null) {
-      throw new ConstraintException(
-          "Default RSGroup (" + groupName + ") for this table's namespace does not exist.");
-    }
-    if (!rsGroupInfo.containsTable(desc.getTableName())) {
-      LOG.debug("Pre-moving table " + desc.getTableName() + " to RSGroup " + groupName);
-      groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), groupName);
-    }
-  }
-
   /////////////////////////////////////////////////////////////////////////////
   // MasterObserver overrides
   /////////////////////////////////////////////////////////////////////////////
 
-  private boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException {
-    String groupName;
-    try {
-      groupName = master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
-          .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
-      if (groupName == null) {
-        groupName = RSGroupInfo.DEFAULT_GROUP;
-      }
-    } catch (MasterNotRunningException | PleaseHoldException e) {
-      LOG.info("Master has not initialized yet; temporarily using default RSGroup '" +
-          RSGroupInfo.DEFAULT_GROUP + "' for deploy of system table");
-      groupName = RSGroupInfo.DEFAULT_GROUP;
+  @Override
+  public void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      List<ServerName> servers, List<ServerName> notClearedServers) throws IOException {
+    Set<Address> clearedServer =
+        servers.stream().filter(server -> !notClearedServers.contains(server))
+            .map(ServerName::getAddress).collect(Collectors.toSet());
+    if (!clearedServer.isEmpty()) {
+      groupAdminServer.removeServers(clearedServer);
     }
+  }
 
-    RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
-    if (rsGroupInfo == null) {
-      throw new ConstraintException(
-          "Default RSGroup (" + groupName + ") for this table's " + "namespace does not exist.");
+  private void checkGroupExists(Optional<String> optGroupName) throws IOException {
+    if (optGroupName.isPresent()) {
+      String groupName = optGroupName.get();
+      if (groupAdminServer.getRSGroupInfo(groupName) == null) {
+        throw new ConstraintException("Region server group " + groupName + " does not exit");
+      }
     }
+  }
 
-    for (ServerName onlineServer : master.getServerManager().createDestinationServersList()) {
-      if (rsGroupInfo.getServers().contains(onlineServer.getAddress())) {
+  private boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException {
+    RSGroupInfo rsGroupInfo;
+    Optional<String> optGroupName = desc.getRegionServerGroup();
+    if (optGroupName.isPresent()) {
+      String groupName = optGroupName.get();
+      if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) {
+        // do not check for default group
+        return true;
+      }
+      rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
+      if (rsGroupInfo == null) {
+        throw new ConstraintException(
+            "RSGroup " + groupName + " for table " + desc.getTableName() + " does not exist");
+      }
+    } else {
+      NamespaceDescriptor nd =
+          master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString());
+      String groupNameOfNs = nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
+      if (groupNameOfNs == null || groupNameOfNs.equals(RSGroupInfo.DEFAULT_GROUP)) {
+        // do not check for default group
         return true;
       }
+      rsGroupInfo = groupAdminServer.getRSGroupInfo(groupNameOfNs);
+      if (rsGroupInfo == null) {
+        throw new ConstraintException("RSGroup " + groupNameOfNs + " for table " +
+            desc.getTableName() + "(inherit from namespace) does not exist");
+      }
     }
-    return false;
+    return master.getServerManager().createDestinationServersList().stream()
+        .anyMatch(onlineServer -> rsGroupInfo.containsServer(onlineServer.getAddress()));
   }
 
   @Override
-  public void preCreateTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx,
-      final TableDescriptor desc, final RegionInfo[] regions) throws IOException {
+  public void preCreateTableAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      TableDescriptor desc, RegionInfo[] regions) throws IOException {
+    checkGroupExists(desc.getRegionServerGroup());
     if (!desc.getTableName().isSystemTable() && !rsgroupHasServersOnline(desc)) {
-      throw new HBaseIOException("No online servers in the rsgroup, which table " +
-          desc.getTableName().getNameAsString() + " belongs to");
+      throw new HBaseIOException("No online servers in the rsgroup for " + desc);
     }
   }
 
-  // Assign table to default RSGroup.
-  @Override
-  public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      TableDescriptor desc, RegionInfo[] regions) throws IOException {
-    assignTableToGroup(desc);
-  }
-
-  // Remove table from its RSGroup.
   @Override
-  public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      TableName tableName) throws IOException {
-    try {
-      RSGroupInfo group = groupAdminServer.getRSGroupInfoOfTable(tableName);
-      if (group != null) {
-        LOG.debug(String.format("Removing deleted table '%s' from rsgroup '%s'", tableName,
-          group.getName()));
-        groupAdminServer.moveTables(Sets.newHashSet(tableName), null);
-      }
-    } catch (IOException ex) {
-      LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex);
-    }
+  public TableDescriptor preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      TableName tableName, TableDescriptor currentDescriptor, TableDescriptor newDescriptor)
+      throws IOException {
+    checkGroupExists(newDescriptor.getRegionServerGroup());
+    return MasterObserver.super.preModifyTable(ctx, tableName, currentDescriptor, newDescriptor);
   }
 
   @Override
   public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
       NamespaceDescriptor ns) throws IOException {
-    String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
-    if (group != null && groupAdminServer.getRSGroupInfo(group) == null) {
-      throw new ConstraintException("Region server group " + group + " does not exit");
-    }
+    checkGroupExists(
+      Optional.ofNullable(ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP)));
   }
 
   @Override
   public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      NamespaceDescriptor currentNsDesc, NamespaceDescriptor newNsDesc) throws IOException {
-    preCreateNamespace(ctx, newNsDesc);
-  }
-
-  @Override
-  public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      SnapshotDescription snapshot, TableDescriptor desc) throws IOException {
-    assignTableToGroup(desc);
-  }
-
-  @Override
-  public void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      List<ServerName> servers, List<ServerName> notClearedServers) throws IOException {
-    Set<Address> clearedServer =
-        servers.stream().filter(server -> !notClearedServers.contains(server))
-            .map(ServerName::getAddress).collect(Collectors.toSet());
-    if (!clearedServer.isEmpty()) {
-      groupAdminServer.removeServers(clearedServer);
-    }
+      NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor)
+      throws IOException {
+    checkGroupExists(Optional
+        .ofNullable(newNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP)));
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index f3ef4fb..59950e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -26,15 +26,16 @@ import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.function.Function;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.LoadBalancer;
@@ -42,7 +43,6 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -85,14 +85,6 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     return rsGroupInfoManager.getRSGroup(groupName);
   }
 
-  @Override
-  public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException {
-    // We are reading across two Maps in the below with out synchronizing across
-    // them; should be safe most of the time.
-    String groupName = rsGroupInfoManager.getRSGroupOfTable(tableName);
-    return groupName == null? null: rsGroupInfoManager.getRSGroup(groupName);
-  }
-
   private void checkOnlineServersOnly(Set<Address> servers) throws ConstraintException {
     // This uglyness is because we only have Address, not ServerName.
     // Online servers are keyed by ServerName.
@@ -159,104 +151,24 @@ public class RSGroupAdminServer implements RSGroupAdmin {
   }
 
   /**
-   * Check servers and tables.
-   *
-   * @param servers servers to move
-   * @param tables tables to move
-   * @param targetGroupName target group name
-   * @throws IOException if nulls or if servers and tables not belong to the same group
-   */
-  private void checkServersAndTables(Set<Address> servers, Set<TableName> tables,
-                                     String targetGroupName) throws IOException {
-    // Presume first server's source group. Later ensure all servers are from this group.
-    Address firstServer = servers.iterator().next();
-    RSGroupInfo tmpSrcGrp = rsGroupInfoManager.getRSGroupOfServer(firstServer);
-    if (tmpSrcGrp == null) {
-      // Be careful. This exception message is tested for in TestRSGroupsBase...
-      throw new ConstraintException("Source RSGroup for server " + firstServer
-              + " does not exist.");
-    }
-    RSGroupInfo srcGrp = new RSGroupInfo(tmpSrcGrp);
-
-    // Only move online servers
-    checkOnlineServersOnly(servers);
-
-    // Ensure all servers are of same rsgroup.
-    for (Address server: servers) {
-      String tmpGroup = rsGroupInfoManager.getRSGroupOfServer(server).getName();
-      if (!tmpGroup.equals(srcGrp.getName())) {
-        throw new ConstraintException("Move server request should only come from one source " +
-                "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup);
-      }
-    }
-
-    // Ensure all tables and servers are of same rsgroup.
-    for (TableName table : tables) {
-      String tmpGroup = rsGroupInfoManager.getRSGroupOfTable(table);
-      if (!tmpGroup.equals(srcGrp.getName())) {
-        throw new ConstraintException("Move table request should only come from one source " +
-                "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup);
-      }
-    }
-
-    if (srcGrp.getServers().size() <= servers.size() && srcGrp.getTables().size() > tables.size()) {
-      throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() +
-              " that contains tables without servers to host them.");
-    }
-  }
-
-  /**
-   * Move every region from servers which are currently located on these servers,
-   * but should not be located there.
-   *
+   * Move every region from servers which are currently located on these servers, but should not be
+   * located there.
    * @param servers the servers that will move to new group
    * @param targetGroupName the target group name
    * @throws IOException if moving the server and tables fail
    */
   private void moveServerRegionsFromGroup(Set<Address> servers, String targetGroupName)
     throws IOException {
-    moveRegionsBetweenGroups(servers, targetGroupName,
-      rs -> getRegions(rs),
-      info -> {
-        try {
-          RSGroupInfo group = getRSGroupInfo(targetGroupName);
-          return group.containsTable(info.getTable());
-        } catch (IOException e) {
-          e.printStackTrace();
-          return false;
-        }
-      },
-      rs -> rs.getHostname());
-  }
-
-  /**
-   * Moves regions of tables which are not on target group servers.
-   *
-   * @param tables the tables that will move to new group
-   * @param targetGroupName the target group name
-   * @throws IOException if moving the region fails
-   */
-  private void moveTableRegionsToGroup(Set<TableName> tables, String targetGroupName)
-    throws IOException {
-    moveRegionsBetweenGroups(tables, targetGroupName,
-      table -> {
-        if (master.getAssignmentManager().isTableDisabled(table)) {
-          return new ArrayList<>();
-        }
-        return master.getAssignmentManager().getRegionStates().getRegionsOfTable(table);
-      },
-      info -> {
-        try {
-          RSGroupInfo group = getRSGroupInfo(targetGroupName);
-          ServerName sn =
-              master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(info);
-          return group.containsServer(sn.getAddress());
-        } catch (IOException e) {
-          e.printStackTrace();
-          return false;
-        }
-      },
-      table -> table.getNameWithNamespaceInclAsString());
+    moveRegionsBetweenGroups(servers, targetGroupName, rs -> getRegions(rs), info -> {
+      try {
+        String groupName = RSGroupUtil.getRSGroupInfo(master, rsGroupInfoManager, info.getTable())
+          .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP);
+        return groupName.equals(targetGroupName);
+      } catch (IOException e) {
+        LOG.warn("Failed to test group for region {} and target group {}", info, targetGroupName);
+        return false;
+      }
+    }, rs -> rs.getHostname());
   }
 
   private <T> void moveRegionsBetweenGroups(Set<T> regionsOwners, String targetGroupName,
@@ -321,9 +233,6 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     }
   }
 
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings(
-      value="RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE",
-      justification="Ignoring complaint because don't know what it is complaining about")
   @Override
   public void moveServers(Set<Address> servers, String targetGroupName) throws IOException {
     if (servers == null) {
@@ -364,9 +273,16 @@ public class RSGroupAdminServer implements RSGroupAdmin {
               "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup);
         }
       }
-      if (srcGrp.getServers().size() <= servers.size() && srcGrp.getTables().size() > 0) {
-        throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() +
-            " that contains tables without servers to host them.");
+      if (srcGrp.getServers().size() <= servers.size()) {
+        // check if there are still tables reference this group
+        for (TableDescriptor td : master.getTableDescriptors().getAll().values()) {
+          Optional<String> optGroupName = td.getRegionServerGroup();
+          if (optGroupName.isPresent() && optGroupName.get().equals(srcGrp.getName())) {
+            throw new ConstraintException(
+                "Cannot leave a RSGroup " + srcGrp.getName() + " that contains tables('" +
+                    td.getTableName() + "' at least) without servers to host them.");
+          }
+        }
       }
 
       // MovedServers may be < passed in 'servers'.
@@ -378,38 +294,6 @@ public class RSGroupAdminServer implements RSGroupAdmin {
   }
 
   @Override
-  public void moveTables(Set<TableName> tables, String targetGroup) throws IOException {
-    if (tables == null) {
-      throw new ConstraintException("The list of servers cannot be null.");
-    }
-    if (tables.size() < 1) {
-      LOG.debug("moveTables() passed an empty set. Ignoring.");
-      return;
-    }
-
-    // Hold a lock on the manager instance while moving servers to prevent
-    // another writer changing our state while we are working.
-    synchronized (rsGroupInfoManager) {
-      if(targetGroup != null) {
-        RSGroupInfo destGroup = rsGroupInfoManager.getRSGroup(targetGroup);
-        if(destGroup == null) {
-          throw new ConstraintException("Target " + targetGroup + " RSGroup does not exist.");
-        }
-        if(destGroup.getServers().size() < 1) {
-          throw new ConstraintException("Target RSGroup must have at least one server.");
-        }
-      }
-      rsGroupInfoManager.moveTables(tables, targetGroup);
-
-      // targetGroup is null when a table is being deleted. In this case no further
-      // action is required.
-      if (targetGroup != null) {
-        moveTableRegionsToGroup(tables, targetGroup);
-      }
-    }
-  }
-
-  @Override
   public void addRSGroup(String name) throws IOException {
     rsGroupInfoManager.addRSGroup(new RSGroupInfo(name));
   }
@@ -423,17 +307,18 @@ public class RSGroupAdminServer implements RSGroupAdmin {
       if (rsGroupInfo == null) {
         throw new ConstraintException("RSGroup " + name + " does not exist");
       }
-      int tableCount = rsGroupInfo.getTables().size();
-      if (tableCount > 0) {
-        throw new ConstraintException("RSGroup " + name + " has " + tableCount +
-            " tables; you must remove these tables from the rsgroup before " +
-            "the rsgroup can be removed.");
-      }
       int serverCount = rsGroupInfo.getServers().size();
       if (serverCount > 0) {
         throw new ConstraintException("RSGroup " + name + " has " + serverCount +
-            " servers; you must remove these servers from the RSGroup before" +
-            "the RSGroup can be removed.");
+          " servers; you must remove these servers from the RSGroup before" +
+          " the RSGroup can be removed.");
+      }
+      for (TableDescriptor td : master.getTableDescriptors().getAll().values()) {
+        if (td.getRegionServerGroup().map(name::equals).orElse(false)) {
+          throw new ConstraintException("RSGroup " + name + " is already referenced by " +
+            td.getTableName() + "; you must remove all the tables from the rsgroup before " +
+            "the rsgroup can be removed.");
+        }
       }
       for (NamespaceDescriptor ns : master.getClusterSchema().getNamespaces()) {
         String nsGroup = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
@@ -458,27 +343,29 @@ public class RSGroupAdminServer implements RSGroupAdmin {
       }
 
       if (getRSGroupInfo(groupName) == null) {
-        throw new ConstraintException("RSGroup does not exist: "+groupName);
+        throw new ConstraintException("RSGroup does not exist: " + groupName);
       }
       // Only allow one balance run at at time.
       Map<String, RegionState> groupRIT = rsGroupGetRegionsInTransition(groupName);
       if (groupRIT.size() > 0) {
         LOG.debug("Not running balancer because {} region(s) in transition: {}", groupRIT.size(),
-            StringUtils.abbreviate(
-              master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(),
-              256));
+          StringUtils.abbreviate(
+            master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(),
+            256));
         return false;
       }
       if (serverManager.areDeadServersInProgress()) {
         LOG.debug("Not running balancer because processing dead regionserver(s): {}",
-            serverManager.getDeadServers());
+          serverManager.getDeadServers());
         return false;
       }
 
-      //We balance per group instead of per table
+      // We balance per group instead of per table
       List<RegionPlan> plans = new ArrayList<>();
-      for(Map.Entry<TableName, Map<ServerName, List<RegionInfo>>> tableMap:
-          getRSGroupAssignmentsByTable(groupName).entrySet()) {
+      Map<TableName, Map<ServerName, List<RegionInfo>>> assignmentsByTable =
+        getRSGroupAssignmentsByTable(groupName);
+      for (Map.Entry<TableName, Map<ServerName, List<RegionInfo>>> tableMap : assignmentsByTable
+        .entrySet()) {
         LOG.info("Creating partial plan for table {} : {}", tableMap.getKey(), tableMap.getValue());
         List<RegionPlan> partialPlans = balancer.balanceCluster(tableMap.getValue());
         LOG.info("Partial plan for table {} : {}", tableMap.getKey(), partialPlans);
@@ -507,100 +394,66 @@ public class RSGroupAdminServer implements RSGroupAdmin {
   }
 
   @Override
-  public void moveServersAndTables(Set<Address> servers, Set<TableName> tables, String targetGroup)
-      throws IOException {
+  public void removeServers(Set<Address> servers) throws IOException {
     if (servers == null || servers.isEmpty()) {
-      throw new ConstraintException("The list of servers to move cannot be null or empty.");
-    }
-    if (tables == null || tables.isEmpty()) {
-      throw new ConstraintException("The list of tables to move cannot be null or empty.");
+      throw new ConstraintException("The set of servers to remove cannot be null or empty.");
     }
-
-    //check target group
-    getAndCheckRSGroupInfo(targetGroup);
-
-    // Hold a lock on the manager instance while moving servers and tables to prevent
+    // Hold a lock on the manager instance while moving servers to prevent
     // another writer changing our state while we are working.
     synchronized (rsGroupInfoManager) {
-      //check servers and tables status
-      checkServersAndTables(servers, tables, targetGroup);
-
-      //Move servers and tables to a new group.
-      String srcGroup = getRSGroupOfServer(servers.iterator().next()).getName();
-      rsGroupInfoManager.moveServersAndTables(servers, tables, srcGroup, targetGroup);
-
-      //move regions on these servers which do not belong to group tables
-      moveServerRegionsFromGroup(servers, targetGroup);
-      //move regions of these tables which are not on group servers
-      moveTableRegionsToGroup(tables, targetGroup);
+      // check the set of servers
+      checkForDeadOrOnlineServers(servers);
+      rsGroupInfoManager.removeServers(servers);
+      LOG.info("Remove decommissioned servers {} from RSGroup done", servers);
     }
-    LOG.info("Move servers and tables done. Severs: {}, Tables: {} => {}", servers, tables,
-        targetGroup);
   }
 
-  @Override
-  public void removeServers(Set<Address> servers) throws IOException {
-    {
-      if (servers == null || servers.isEmpty()) {
-        throw new ConstraintException("The set of servers to remove cannot be null or empty.");
-      }
-      // Hold a lock on the manager instance while moving servers to prevent
-      // another writer changing our state while we are working.
-      synchronized (rsGroupInfoManager) {
-        //check the set of servers
-        checkForDeadOrOnlineServers(servers);
-        rsGroupInfoManager.removeServers(servers);
-        LOG.info("Remove decommissioned servers {} from RSGroup done", servers);
-      }
+  private boolean isTableInGroup(TableName tableName, String groupName,
+    Set<TableName> tablesInGroupCache) throws IOException {
+    if (tablesInGroupCache.contains(tableName)) {
+      return true;
     }
+    if (RSGroupUtil.getRSGroupInfo(master, rsGroupInfoManager, tableName).map(RSGroupInfo::getName)
+      .orElse(RSGroupInfo.DEFAULT_GROUP).equals(groupName)) {
+      tablesInGroupCache.add(tableName);
+      return true;
+    }
+    return false;
   }
 
   private Map<String, RegionState> rsGroupGetRegionsInTransition(String groupName)
-      throws IOException {
+    throws IOException {
     Map<String, RegionState> rit = Maps.newTreeMap();
-    AssignmentManager am = master.getAssignmentManager();
-    for(TableName tableName : getRSGroupInfo(groupName).getTables()) {
-      for(RegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) {
-        RegionState state = am.getRegionStates().getRegionTransitionState(regionInfo);
-        if(state != null) {
-          rit.put(regionInfo.getEncodedName(), state);
-        }
+    Set<TableName> tablesInGroupCache = new HashSet<>();
+    for (RegionStateNode regionNode : master.getAssignmentManager().getRegionsInTransition()) {
+      TableName tn = regionNode.getTable();
+      if (isTableInGroup(tn, groupName, tablesInGroupCache)) {
+        rit.put(regionNode.getRegionInfo().getEncodedName(), regionNode.toRegionState());
       }
     }
     return rit;
   }
 
   private Map<TableName, Map<ServerName, List<RegionInfo>>>
-      getRSGroupAssignmentsByTable(String groupName) throws IOException {
+    getRSGroupAssignmentsByTable(String groupName) throws IOException {
     Map<TableName, Map<ServerName, List<RegionInfo>>> result = Maps.newHashMap();
-    RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName);
-    Map<TableName, Map<ServerName, List<RegionInfo>>> assignments = Maps.newHashMap();
-    for(Map.Entry<RegionInfo, ServerName> entry:
-        master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
-      TableName currTable = entry.getKey().getTable();
-      ServerName currServer = entry.getValue();
-      RegionInfo currRegion = entry.getKey();
-      if (rsGroupInfo.getTables().contains(currTable)) {
-        assignments.putIfAbsent(currTable, new HashMap<>());
-        assignments.get(currTable).putIfAbsent(currServer, new ArrayList<>());
-        assignments.get(currTable).get(currServer).add(currRegion);
+    Set<TableName> tablesInGroupCache = new HashSet<>();
+    for (Map.Entry<RegionInfo, ServerName> entry : master.getAssignmentManager().getRegionStates()
+      .getRegionAssignments().entrySet()) {
+      RegionInfo region = entry.getKey();
+      TableName tn = region.getTable();
+      ServerName server = entry.getValue();
+      if (isTableInGroup(tn, groupName, tablesInGroupCache)) {
+        result.computeIfAbsent(tn, k -> new HashMap<>())
+          .computeIfAbsent(server, k -> new ArrayList<>()).add(region);
       }
     }
-
-    Map<ServerName, List<RegionInfo>> serverMap = Maps.newHashMap();
-    for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) {
-      if(rsGroupInfo.getServers().contains(serverName.getAddress())) {
-        serverMap.put(serverName, Collections.emptyList());
-      }
-    }
-
-    // add all tables that are members of the group
-    for(TableName tableName : rsGroupInfo.getTables()) {
-      if(assignments.containsKey(tableName)) {
-        result.put(tableName, new HashMap<>());
-        result.get(tableName).putAll(serverMap);
-        result.get(tableName).putAll(assignments.get(tableName));
-        LOG.debug("Adding assignments for {}: {}", tableName, assignments.get(tableName));
+    RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName);
+    for (ServerName serverName : master.getServerManager().getOnlineServers().keySet()) {
+      if (rsGroupInfo.containsServer(serverName.getAddress())) {
+        for (Map<ServerName, List<RegionInfo>> map : result.values()) {
+          map.computeIfAbsent(serverName, k -> Collections.emptyList());
+        }
       }
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java
index 918a4fe..6bc4519 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java
@@ -20,14 +20,24 @@ package org.apache.hadoop.hbase.rsgroup;
 import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
 import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
@@ -57,6 +67,8 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@@ -68,6 +80,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
  */
 class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
 
+  private static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminServiceImpl.class);
+
   private MasterServices master;
 
   private RSGroupAdminServer groupAdminServer;
@@ -107,12 +121,17 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
     return userProvider.getCurrent();
   }
 
+  // for backward compatible
+  private RSGroupInfo fillTables(RSGroupInfo rsGroupInfo) throws IOException {
+    return RSGroupUtil.fillTables(rsGroupInfo, master.getTableDescriptors().getAll().values());
+  }
+
   @Override
   public void getRSGroupInfo(RpcController controller, GetRSGroupInfoRequest request,
       RpcCallback<GetRSGroupInfoResponse> done) {
     GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder();
     String groupName = request.getRSGroupName();
-    RSGroupAdminEndpoint.LOG.info(
+    LOG.info(
       master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName);
     try {
       if (master.getMasterCoprocessorHost() != null) {
@@ -121,7 +140,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
       checkPermission("getRSGroupInfo");
       RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
       if (rsGroupInfo != null) {
-        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
+        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(rsGroupInfo)));
       }
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
@@ -137,17 +156,24 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
       RpcCallback<GetRSGroupInfoOfTableResponse> done) {
     GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder();
     TableName tableName = ProtobufUtil.toTableName(request.getTableName());
-    RSGroupAdminEndpoint.LOG.info(
+    LOG.info(
       master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName);
     try {
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
       }
       checkPermission("getRSGroupInfoOfTable");
-      RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName);
-      if (RSGroupInfo != null) {
-        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
+      Optional<RSGroupInfo> optGroup =
+          RSGroupUtil.getRSGroupInfo(master, groupAdminServer, tableName);
+      if (optGroup.isPresent()) {
+        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(optGroup.get())));
+      } else {
+        if (master.getTableStateManager().isTablePresent(tableName)) {
+          RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
+          builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(rsGroupInfo)));
+        }
       }
+
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
       }
@@ -165,8 +191,8 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
     for (HBaseProtos.ServerName el : request.getServersList()) {
       hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
     }
-    RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +
-        " to rsgroup " + request.getTargetGroup());
+    LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " +
+        request.getTargetGroup());
     try {
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
@@ -182,6 +208,27 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
     done.run(builder.build());
   }
 
+  private void moveTablesAndWait(Set<TableName> tables, String targetGroup) throws IOException {
+    List<Long> procIds = new ArrayList<Long>();
+    for (TableName tableName : tables) {
+      TableDescriptor oldTd = master.getTableDescriptors().get(tableName);
+      if (oldTd == null) {
+        continue;
+      }
+      TableDescriptor newTd =
+          TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build();
+      procIds.add(master.modifyTable(tableName, newTd, HConstants.NO_NONCE, HConstants.NO_NONCE));
+    }
+    for (long procId : procIds) {
+      Procedure<?> proc = master.getMasterProcedureExecutor().getProcedure(procId);
+      if (proc == null) {
+        continue;
+      }
+      ProcedureSyncWait.waitForProcedureToCompleteIOE(master.getMasterProcedureExecutor(), proc,
+        Long.MAX_VALUE);
+    }
+  }
+
   @Override
   public void moveTables(RpcController controller, MoveTablesRequest request,
       RpcCallback<MoveTablesResponse> done) {
@@ -190,14 +237,14 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
     for (HBaseProtos.TableName tableName : request.getTableNameList()) {
       tables.add(ProtobufUtil.toTableName(tableName));
     }
-    RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +
-        " to rsgroup " + request.getTargetGroup());
+    LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables + " to rsgroup " +
+        request.getTargetGroup());
     try {
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup());
       }
       checkPermission("moveTables");
-      groupAdminServer.moveTables(tables, request.getTargetGroup());
+      moveTablesAndWait(tables, request.getTargetGroup());
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().postMoveTables(tables, request.getTargetGroup());
       }
@@ -211,8 +258,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
   public void addRSGroup(RpcController controller, AddRSGroupRequest request,
       RpcCallback<AddRSGroupResponse> done) {
     AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
-    RSGroupAdminEndpoint.LOG
-        .info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
+    LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
     try {
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
@@ -232,8 +278,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
   public void removeRSGroup(RpcController controller, RemoveRSGroupRequest request,
       RpcCallback<RemoveRSGroupResponse> done) {
     RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder();
-    RSGroupAdminEndpoint.LOG
-        .info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
+    LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
     try {
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
@@ -253,7 +298,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
   public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest request,
       RpcCallback<BalanceRSGroupResponse> done) {
     BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder();
-    RSGroupAdminEndpoint.LOG.info(
+    LOG.info(
       master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName());
     try {
       if (master.getMasterCoprocessorHost() != null) {
@@ -276,14 +321,28 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
   public void listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest request,
       RpcCallback<ListRSGroupInfosResponse> done) {
     ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
-    RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " list rsgroup");
+    LOG.info(master.getClientIdAuditPrefix() + " list rsgroup");
     try {
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preListRSGroups();
       }
       checkPermission("listRSGroup");
-      for (RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) {
-        builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
+      List<RSGroupInfo> rsGroupInfos = groupAdminServer.listRSGroups().stream()
+          .map(RSGroupInfo::new).collect(Collectors.toList());
+      Map<String, RSGroupInfo> name2Info = new HashMap<>();
+      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
+        name2Info.put(rsGroupInfo.getName(), rsGroupInfo);
+      }
+      for (TableDescriptor td : master.getTableDescriptors().getAll().values()) {
+        String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
+        RSGroupInfo rsGroupInfo = name2Info.get(groupName);
+        if (rsGroupInfo != null) {
+          rsGroupInfo.addTable(td.getTableName());
+        }
+      }
+      for (RSGroupInfo rsGroupInfo : rsGroupInfos) {
+        // TODO: this can be done at once outside this loop, do not need to scan all every time.
+        builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
       }
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().postListRSGroups();
@@ -300,8 +359,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
     GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder();
     Address hp =
         Address.fromParts(request.getServer().getHostName(), request.getServer().getPort());
-    RSGroupAdminEndpoint.LOG
-        .info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp);
+    LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp);
     try {
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
@@ -309,7 +367,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
       checkPermission("getRSGroupInfoOfServer");
       RSGroupInfo info = groupAdminServer.getRSGroupOfServer(hp);
       if (info != null) {
-        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(info));
+        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(info)));
       }
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
@@ -332,15 +390,16 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
     for (HBaseProtos.TableName tableName : request.getTableNameList()) {
       tables.add(ProtobufUtil.toTableName(tableName));
     }
-    RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +
-        " and tables " + tables + " to rsgroup" + request.getTargetGroup());
+    LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " and tables " +
+        tables + " to rsgroup" + request.getTargetGroup());
     try {
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables,
           request.getTargetGroup());
       }
       checkPermission("moveServersAndTables");
-      groupAdminServer.moveServersAndTables(hostPorts, tables, request.getTargetGroup());
+      groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
+      moveTablesAndWait(tables, request.getTargetGroup());
       if (master.getMasterCoprocessorHost() != null) {
         master.getMasterCoprocessorHost().postMoveServersAndTables(hostPorts, tables,
           request.getTargetGroup());
@@ -359,7 +418,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
     for (HBaseProtos.ServerName el : request.getServersList()) {
       servers.add(Address.fromParts(el.getHostName(), el.getPort()));
     }
-    RSGroupAdminEndpoint.LOG.info(
+    LOG.info(
       master.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers);
     try {
       if (master.getMasterCoprocessorHost() != null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index be76805..0f943d0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -27,7 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -111,13 +110,13 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
 
   @Override
   public List<RegionPlan> balanceCluster(TableName tableName, Map<ServerName, List<RegionInfo>>
-      clusterState) throws HBaseIOException {
+      clusterState) throws IOException {
     return balanceCluster(clusterState);
   }
 
   @Override
   public List<RegionPlan> balanceCluster(Map<ServerName, List<RegionInfo>> clusterState)
-      throws HBaseIOException {
+      throws IOException {
     if (!isOnline()) {
       throw new ConstraintException(
           RSGroupInfoManager.class.getSimpleName() + " is not online, unable to perform balance");
@@ -168,8 +167,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   @Override
-  public Map<ServerName, List<RegionInfo>> roundRobinAssignment(List<RegionInfo> regions,
-    List<ServerName> servers) throws HBaseIOException {
+  public Map<ServerName, List<RegionInfo>> roundRobinAssignment(
+      List<RegionInfo> regions, List<ServerName> servers) throws IOException {
     Map<ServerName, List<RegionInfo>> assignments = Maps.newHashMap();
     ListMultimap<String, RegionInfo> regionMap = ArrayListMultimap.create();
     ListMultimap<String, ServerName> serverMap = ArrayListMultimap.create();
@@ -198,12 +197,11 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
     try {
       Map<ServerName, List<RegionInfo>> assignments = new TreeMap<>();
       ListMultimap<String, RegionInfo> groupToRegion = ArrayListMultimap.create();
+      RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP);
       for (RegionInfo region : regions.keySet()) {
-        String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());
-        if (groupName == null) {
-          LOG.debug("Group not found for table " + region.getTable() + ", using default");
-          groupName = RSGroupInfo.DEFAULT_GROUP;
-        }
+        String groupName =
+          RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable())
+              .orElse(defaultInfo).getName();
         groupToRegion.put(groupName, region);
       }
       for (String key : groupToRegion.keySet()) {
@@ -234,7 +232,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
 
   @Override
   public ServerName randomAssignment(RegionInfo region,
-      List<ServerName> servers) throws HBaseIOException {
+      List<ServerName> servers) throws IOException {
     ListMultimap<String,RegionInfo> regionMap = LinkedListMultimap.create();
     ListMultimap<String,ServerName> serverMap = LinkedListMultimap.create();
     generateGroupMaps(Lists.newArrayList(region), servers, regionMap, serverMap);
@@ -246,12 +244,11 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
     ListMultimap<String, RegionInfo> regionMap, ListMultimap<String, ServerName> serverMap)
     throws HBaseIOException {
     try {
+      RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP);
       for (RegionInfo region : regions) {
-        String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());
-        if (groupName == null) {
-          LOG.debug("Group not found for table " + region.getTable() + ", using default");
-          groupName = RSGroupInfo.DEFAULT_GROUP;
-        }
+        String groupName =
+            RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable())
+                .orElse(defaultInfo).getName();
         regionMap.put(groupName, region);
       }
       for (String groupKey : regionMap.keySet()) {
@@ -299,11 +296,11 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   private Pair<Map<ServerName, List<RegionInfo>>, List<RegionPlan>> correctAssignments(
-      Map<ServerName, List<RegionInfo>> existingAssignments) throws HBaseIOException{
+      Map<ServerName, List<RegionInfo>> existingAssignments) throws IOException {
     // To return
     Map<ServerName, List<RegionInfo>> correctAssignments = new TreeMap<>();
     List<RegionPlan> regionPlansForMisplacedRegions = new ArrayList<>();
-
+    RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP);
     for (Map.Entry<ServerName, List<RegionInfo>> assignments : existingAssignments.entrySet()){
       ServerName currentHostServer = assignments.getKey();
       correctAssignments.put(currentHostServer, new LinkedList<>());
@@ -311,15 +308,11 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
       for (RegionInfo region : regions) {
         RSGroupInfo targetRSGInfo = null;
         try {
-          String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());
-          if (groupName == null) {
-            LOG.debug("Group not found for table " + region.getTable() + ", using default");
-            groupName = RSGroupInfo.DEFAULT_GROUP;
-          }
-          targetRSGInfo = rsGroupInfoManager.getRSGroup(groupName);
+          targetRSGInfo =
+              RSGroupUtil.getRSGroupInfo(masterServices, rsGroupInfoManager, region.getTable())
+                  .orElse(defaultInfo);
         } catch (IOException exp) {
-          LOG.debug("RSGroup information null for region of table " + region.getTable(),
-              exp);
+          LOG.debug("RSGroup information null for region of table " + region.getTable(), exp);
         }
         if (targetRSGInfo == null ||
             !targetRSGInfo.containsServer(currentHostServer.getAddress())) { // region is mis-placed
@@ -336,7 +329,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   @Override
-  public void initialize() throws HBaseIOException {
+  public void initialize() throws IOException {
     try {
       if (rsGroupInfoManager == null) {
         List<RSGroupAdminEndpoint> cps =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 70aeabf..28f7c1f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.rsgroup;
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -64,18 +63,6 @@ public interface RSGroupInfoManager {
   RSGroupInfo getRSGroup(String groupName) throws IOException;
 
   /**
-   * Get the group membership of a table
-   */
-  String getRSGroupOfTable(TableName tableName) throws IOException;
-
-  /**
-   * Set the group membership of a set of tables
-   * @param tableNames set of tables to move
-   * @param groupName name of group of tables to move to
-   */
-  void moveTables(Set<TableName> tableNames, String groupName) throws IOException;
-
-  /**
    * List the existing {@code RSGroupInfo}s.
    */
   List<RSGroupInfo> listRSGroups() throws IOException;
@@ -92,16 +79,6 @@ public interface RSGroupInfoManager {
   boolean isOnline();
 
   /**
-   * Move servers and tables to a new group.
-   * @param servers list of servers, must be part of the same group
-   * @param tables set of tables to move
-   * @param srcGroup groupName being moved from
-   * @param dstGroup groupName being moved to
-   */
-  void moveServersAndTables(Set<Address> servers, Set<TableName> tables, String srcGroup,
-      String dstGroup) throws IOException;
-
-  /**
    * Remove decommissioned servers from rsgroup
    * @param servers set of servers to remove
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 8aa7520..37f3ce6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -23,10 +23,8 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.NavigableSet;
 import java.util.OptionalLong;
 import java.util.Set;
 import java.util.SortedSet;
@@ -143,7 +141,6 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   // There two Maps are immutable and wholesale replaced on each modification
   // so are safe to access concurrently. See class comment.
   private volatile Map<String, RSGroupInfo> rsGroupMap = Collections.emptyMap();
-  private volatile Map<TableName, String> tableMap = Collections.emptyMap();
 
   private final MasterServices masterServices;
   private final AsyncClusterConnection conn;
@@ -261,44 +258,6 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   }
 
   @Override
-  public String getRSGroupOfTable(TableName tableName) {
-    return tableMap.get(tableName);
-  }
-
-  @Override
-  public synchronized void moveTables(Set<TableName> tableNames, String groupName)
-      throws IOException {
-    // Check if rsGroupMap contains the destination rsgroup
-    if (groupName != null && !rsGroupMap.containsKey(groupName)) {
-      throw new DoNotRetryIOException("Group " + groupName + " does not exist");
-    }
-
-    // Make a copy of rsGroupMap to update
-    Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
-
-    // Remove tables from their original rsgroups
-    // and update the copy of rsGroupMap
-    for (TableName tableName : tableNames) {
-      if (tableMap.containsKey(tableName)) {
-        RSGroupInfo src = new RSGroupInfo(newGroupMap.get(tableMap.get(tableName)));
-        src.removeTable(tableName);
-        newGroupMap.put(src.getName(), src);
-      }
-    }
-
-    // Add tables to the destination rsgroup
-    // and update the copy of rsGroupMap
-    if (groupName != null) {
-      RSGroupInfo dstGroup = new RSGroupInfo(newGroupMap.get(groupName));
-      dstGroup.addAllTables(tableNames);
-      newGroupMap.put(dstGroup.getName(), dstGroup);
-    }
-
-    // Flush according to the updated copy of rsGroupMap
-    flushConfig(newGroupMap);
-  }
-
-  @Override
   public synchronized void removeRSGroup(String groupName) throws IOException {
     if (!rsGroupMap.containsKey(groupName) || groupName.equals(RSGroupInfo.DEFAULT_GROUP)) {
       throw new DoNotRetryIOException(
@@ -311,7 +270,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
 
   @Override
   public List<RSGroupInfo> listRSGroups() {
-    return Lists.newLinkedList(rsGroupMap.values());
+    return Lists.newArrayList(rsGroupMap.values());
   }
 
   @Override
@@ -320,31 +279,6 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   }
 
   @Override
-  public void moveServersAndTables(Set<Address> servers, Set<TableName> tables, String srcGroup,
-      String dstGroup) throws IOException {
-    // get server's group
-    RSGroupInfo srcGroupInfo = getRSGroupInfo(srcGroup);
-    RSGroupInfo dstGroupInfo = getRSGroupInfo(dstGroup);
-
-    // move servers
-    for (Address el : servers) {
-      srcGroupInfo.removeServer(el);
-      dstGroupInfo.addServer(el);
-    }
-    // move tables
-    for (TableName tableName : tables) {
-      srcGroupInfo.removeTable(tableName);
-      dstGroupInfo.addTable(tableName);
-    }
-
-    // flush changed groupinfo
-    Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
-    newGroupMap.put(srcGroupInfo.getName(), srcGroupInfo);
-    newGroupMap.put(dstGroupInfo.getName(), dstGroupInfo);
-    flushConfig(newGroupMap);
-  }
-
-  @Override
   public synchronized void removeServers(Set<Address> servers) throws IOException {
     Map<String, RSGroupInfo> rsGroupInfos = new HashMap<String, RSGroupInfo>();
     for (Address el : servers) {
@@ -425,7 +359,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
    * startup of the manager.
    */
   private synchronized void refresh(boolean forceOnline) throws IOException {
-    List<RSGroupInfo> groupList = new LinkedList<>();
+    List<RSGroupInfo> groupList = new ArrayList<>();
 
     // Overwrite anything read from zk, group table is source of truth
     // if online read from GROUP table
@@ -437,37 +371,20 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       groupList.addAll(retrieveGroupListFromZookeeper());
     }
 
-    // refresh default group, prune
-    NavigableSet<TableName> orphanTables = new TreeSet<>();
-    for (String entry : masterServices.getTableDescriptors().getAll().keySet()) {
-      orphanTables.add(TableName.valueOf(entry));
-    }
-    for (RSGroupInfo group : groupList) {
-      if (!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
-        orphanTables.removeAll(group.getTables());
-      }
-    }
-
     // This is added to the last of the list so it overwrites the 'default' rsgroup loaded
     // from region group table or zk
-    groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers(), orphanTables));
+    groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers()));
 
     // populate the data
     HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap();
-    HashMap<TableName, String> newTableMap = Maps.newHashMap();
     for (RSGroupInfo group : groupList) {
       newGroupMap.put(group.getName(), group);
-      for (TableName table : group.getTables()) {
-        newTableMap.put(table, group.getName());
-      }
     }
-    resetRSGroupAndTableMaps(newGroupMap, newTableMap);
+    resetRSGroupMap(newGroupMap);
     updateCacheOfRSGroups(rsGroupMap.keySet());
   }
 
-  private synchronized Map<TableName, String> flushConfigTable(Map<String, RSGroupInfo> groupMap)
-      throws IOException {
-    Map<TableName, String> newTableMap = Maps.newHashMap();
+  private void flushConfigTable(Map<String, RSGroupInfo> groupMap) throws IOException {
     List<Mutation> mutations = Lists.newArrayList();
 
     // populate deletes
@@ -484,15 +401,11 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       Put p = new Put(Bytes.toBytes(RSGroupInfo.getName()));
       p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
       mutations.add(p);
-      for (TableName entry : RSGroupInfo.getTables()) {
-        newTableMap.put(entry, RSGroupInfo.getName());
-      }
     }
 
     if (mutations.size() > 0) {
       multiMutate(mutations);
     }
-    return newTableMap;
   }
 
   private synchronized void flushConfig() throws IOException {
@@ -500,8 +413,6 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   }
 
   private synchronized void flushConfig(Map<String, RSGroupInfo> newGroupMap) throws IOException {
-    Map<TableName, String> newTableMap;
-
     // For offline mode persistence is still unavailable
     // We're refreshing in-memory state but only for servers in default group
     if (!isOnline()) {
@@ -516,7 +427,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP);
       if (!oldGroupMap.equals(newGroupMap) /* compare both tables and servers in other groups */ ||
           !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables())
-          /* compare tables in default group */) {
+      /* compare tables in default group */) {
         throw new IOException("Only servers in default group can be updated during offline mode");
       }
 
@@ -533,11 +444,11 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       return;
     }
 
-    /* For online mode, persist to Zookeeper */
-    newTableMap = flushConfigTable(newGroupMap);
+    /* For online mode, persist to hbase:rsgroup and Zookeeper */
+    flushConfigTable(newGroupMap);
 
     // Make changes visible after having been persisted to the source of truth
-    resetRSGroupAndTableMaps(newGroupMap, newTableMap);
+    resetRSGroupMap(newGroupMap);
 
     try {
       String groupBasePath =
@@ -575,11 +486,9 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   /**
    * Make changes visible. Caller must be synchronized on 'this'.
    */
-  private void resetRSGroupAndTableMaps(Map<String, RSGroupInfo> newRSGroupMap,
-      Map<TableName, String> newTableMap) {
+  private void resetRSGroupMap(Map<String, RSGroupInfo> newRSGroupMap) {
     // Make maps Immutable.
     this.rsGroupMap = Collections.unmodifiableMap(newRSGroupMap);
-    this.tableMap = Collections.unmodifiableMap(newTableMap);
   }
 
   /**
@@ -597,7 +506,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
       return masterServices.getServerManager().getOnlineServersList();
     }
     LOG.debug("Reading online RS from zookeeper");
-    List<ServerName> servers = new LinkedList<>();
+    List<ServerName> servers = new ArrayList<>();
     try {
       for (String el : ZKUtil.listChildrenNoWatch(watcher, watcher.getZNodePaths().rsZNode)) {
         servers.add(ServerName.parseServerName(el));
@@ -633,7 +542,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   // the rsGroupMap then writing it out.
   private synchronized void updateDefaultServers(SortedSet<Address> servers) throws IOException {
     RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP);
-    RSGroupInfo newInfo = new RSGroupInfo(info.getName(), servers, info.getTables());
+    RSGroupInfo newInfo = new RSGroupInfo(info.getName(), servers);
     HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
     newGroupMap.put(newInfo.getName(), newInfo);
     flushConfig(newGroupMap);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java
new file mode 100644
index 0000000..a08d236
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Optional;
+import java.util.function.Predicate;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.ClusterSchema;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Helper class for RSGroup implementation
+ */
+@InterfaceAudience.Private
+final class RSGroupUtil {
+
+  private static final Logger LOG = LoggerFactory.getLogger(RSGroupUtil.class);
+
+  private RSGroupUtil() {
+  }
+
+  @FunctionalInterface
+  private interface GetRSGroup {
+    RSGroupInfo get(String groupName) throws IOException;
+  }
+
+  private static Optional<RSGroupInfo> getRSGroupInfo(MasterServices master, GetRSGroup getter,
+      TableName tableName) throws IOException {
+    TableDescriptor td = master.getTableDescriptors().get(tableName);
+    if (td == null) {
+      return Optional.empty();
+    }
+    Optional<String> optGroupNameOfTable = td.getRegionServerGroup();
+    if (optGroupNameOfTable.isPresent()) {
+      RSGroupInfo group = getter.get(optGroupNameOfTable.get());
+      if (group != null) {
+        return Optional.of(group);
+      }
+    }
+    ClusterSchema clusterSchema = master.getClusterSchema();
+    if (clusterSchema == null) {
+      if (TableName.isMetaTableName(tableName)) {
+        LOG.info("Can not get the namespace rs group config for meta table, since the" +
+            " meta table is not online yet, will use default group to assign meta first");
+      } else {
+        LOG.warn("ClusterSchema is null, can only use default rsgroup, should not happen?");
+      }
+      return Optional.empty();
+    }
+    NamespaceDescriptor nd = clusterSchema.getNamespace(tableName.getNamespaceAsString());
+    String groupNameOfNs = nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
+    if (groupNameOfNs == null) {
+      return Optional.empty();
+    }
+    return Optional.ofNullable(getter.get(groupNameOfNs));
+  }
+
+  /**
+   * Will try to get the rsgroup from {@link TableDescriptor} first, and then try to get the rsgroup
+   * from the {@link NamespaceDescriptor}. If still not present, return empty.
+   */
+  static Optional<RSGroupInfo> getRSGroupInfo(MasterServices master, RSGroupInfoManager manager,
+      TableName tableName) throws IOException {
+    return getRSGroupInfo(master, manager::getRSGroup, tableName);
+  }
+
+  /**
+   * Will try to get the rsgroup from {@link TableDescriptor} first, and then try to get the rsgroup
+   * from the {@link NamespaceDescriptor}. If still not present, return empty.
+   */
+  static Optional<RSGroupInfo> getRSGroupInfo(MasterServices master, RSGroupAdmin admin,
+      TableName tableName) throws IOException {
+    return getRSGroupInfo(master, admin::getRSGroupInfo, tableName);
+  }
+
+  /**
+   * Fill the tables field for {@link RSGroupInfo}, for backward compatibility.
+   */
+  @SuppressWarnings("deprecation")
+  static RSGroupInfo fillTables(RSGroupInfo rsGroupInfo, Collection<TableDescriptor> tds) {
+    RSGroupInfo newRsGroupInfo = new RSGroupInfo(rsGroupInfo);
+    Predicate<TableDescriptor> filter;
+    if (rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
+      filter = td -> {
+        Optional<String> optGroupName = td.getRegionServerGroup();
+        return !optGroupName.isPresent() || optGroupName.get().equals(RSGroupInfo.DEFAULT_GROUP);
+      };
+    } else {
+      filter = td -> {
+        Optional<String> optGroupName = td.getRegionServerGroup();
+        return optGroupName.isPresent() && optGroupName.get().equals(newRsGroupInfo.getName());
+      };
+    }
+    tds.stream().filter(filter).map(TableDescriptor::getTableName)
+        .forEach(newRsGroupInfo::addTable);
+    return newRsGroupInfo;
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java
index 6dc3711..47337f9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -26,7 +27,6 @@ import java.util.Map;
 import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
@@ -82,7 +82,7 @@ public class TestRegionPlacement2 {
   }
 
   @Test
-  public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException {
+  public void testFavoredNodesPresentForRoundRobinAssignment() throws IOException {
     LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
     balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
     balancer.initialize();
@@ -143,7 +143,7 @@ public class TestRegionPlacement2 {
   }
 
   @Test
-  public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException {
+  public void testFavoredNodesPresentForRandomAssignment() throws IOException {
     LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
     balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
     balancer.initialize();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java
index 570bb3a..4c00bcf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java
@@ -28,6 +28,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
@@ -60,17 +61,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 public class RSGroupableBalancerTestBase {
 
   static SecureRandom rand = new SecureRandom();
-  static String[] groups = new String[] {RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4"};
+  static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" };
   static TableName table0 = TableName.valueOf("dt0");
-  static TableName[] tables =
-      new TableName[] { TableName.valueOf("dt1"),
-          TableName.valueOf("dt2"),
-          TableName.valueOf("dt3"),
-          TableName.valueOf("dt4")};
+  static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"),
+      TableName.valueOf("dt3"), TableName.valueOf("dt4") };
   static List<ServerName> servers;
   static Map<String, RSGroupInfo> groupMap;
-  static Map<TableName, String> tableMap = new HashMap<>();
-  static List<TableDescriptor> tableDescs;
+  static Map<TableName, TableDescriptor> tableDescs;
   int[] regionAssignment = new int[] { 2, 5, 7, 10, 4, 3, 1 };
   static int regionId = 0;
 
@@ -113,20 +110,19 @@ public class RSGroupableBalancerTestBase {
   /**
    * All regions have an assignment.
    */
-  protected void assertImmediateAssignment(List<RegionInfo> regions,
-                                         List<ServerName> servers,
-                                         Map<RegionInfo, ServerName> assignments)
-      throws IOException {
+  protected void assertImmediateAssignment(List<RegionInfo> regions, List<ServerName> servers,
+      Map<RegionInfo, ServerName> assignments) throws IOException {
     for (RegionInfo region : regions) {
       assertTrue(assignments.containsKey(region));
       ServerName server = assignments.get(region);
       TableName tableName = region.getTable();
 
-      String groupName = getMockedGroupInfoManager().getRSGroupOfTable(tableName);
+      String groupName =
+          tableDescs.get(tableName).getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
       assertTrue(StringUtils.isNotEmpty(groupName));
       RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName);
       assertTrue("Region is not correctly assigned to group servers.",
-          gInfo.containsServer(server.getAddress()));
+        gInfo.containsServer(server.getAddress()));
     }
   }
 
@@ -169,16 +165,13 @@ public class RSGroupableBalancerTestBase {
         ServerName oldAssignedServer = existing.get(r);
         TableName tableName = r.getTable();
         String groupName =
-            getMockedGroupInfoManager().getRSGroupOfTable(tableName);
+            tableDescs.get(tableName).getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
         assertTrue(StringUtils.isNotEmpty(groupName));
-        RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(
-            groupName);
-        assertTrue(
-            "Region is not correctly assigned to group servers.",
-            gInfo.containsServer(currentServer.getAddress()));
-        if (oldAssignedServer != null
-            && onlineHostNames.contains(oldAssignedServer
-            .getHostname())) {
+        RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName);
+        assertTrue("Region is not correctly assigned to group servers.",
+          gInfo.containsServer(currentServer.getAddress()));
+        if (oldAssignedServer != null &&
+            onlineHostNames.contains(oldAssignedServer.getHostname())) {
           // this region was previously assigned somewhere, and that
           // host is still around, then the host must have been is a
           // different group.
@@ -358,13 +351,12 @@ public class RSGroupableBalancerTestBase {
 
   /**
    * Construct group info, with each group having at least one server.
-   *
    * @param servers the servers
    * @param groups the groups
    * @return the map
    */
-  protected static Map<String, RSGroupInfo> constructGroupInfo(
-      List<ServerName> servers, String[] groups) {
+  protected static Map<String, RSGroupInfo> constructGroupInfo(List<ServerName> servers,
+      String[] groups) {
     assertTrue(servers != null);
     assertTrue(servers.size() >= groups.length);
     int index = 0;
@@ -377,8 +369,7 @@ public class RSGroupableBalancerTestBase {
     }
     while (index < servers.size()) {
       int grpIndex = rand.nextInt(groups.length);
-      groupMap.get(groups[grpIndex]).addServer(
-          servers.get(index).getAddress());
+      groupMap.get(groups[grpIndex]).addServer(servers.get(index).getAddress());
       index++;
     }
     return groupMap;
@@ -389,29 +380,28 @@ public class RSGroupableBalancerTestBase {
    * @param hasBogusTable there is a table that does not determine the group
    * @return the list of table descriptors
    */
-  protected static List<TableDescriptor> constructTableDesc(boolean hasBogusTable) {
-    List<TableDescriptor> tds = Lists.newArrayList();
+  protected static Map<TableName, TableDescriptor> constructTableDesc(boolean hasBogusTable) {
+    Map<TableName, TableDescriptor> tds = new HashMap<>();
     int index = rand.nextInt(groups.length);
     for (int i = 0; i < tables.length; i++) {
-      TableDescriptor htd = TableDescriptorBuilder.newBuilder(tables[i]).build();
       int grpIndex = (i + index) % groups.length;
       String groupName = groups[grpIndex];
-      tableMap.put(tables[i], groupName);
-      tds.add(htd);
+      TableDescriptor htd =
+          TableDescriptorBuilder.newBuilder(tables[i]).setRegionServerGroup(groupName).build();
+      tds.put(htd.getTableName(), htd);
     }
     if (hasBogusTable) {
-      tableMap.put(table0, "");
-      tds.add(TableDescriptorBuilder.newBuilder(table0).build());
+      tds.put(table0, TableDescriptorBuilder.newBuilder(table0).setRegionServerGroup("").build());
     }
     return tds;
   }
 
   protected static MasterServices getMockedMaster() throws IOException {
     TableDescriptors tds = Mockito.mock(TableDescriptors.class);
-    Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0));
-    Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1));
-    Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2));
-    Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3));
+    Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(tables[0]));
+    Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(tables[1]));
+    Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(tables[2]));
+    Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(tables[3]));
     MasterServices services = Mockito.mock(HMaster.class);
     Mockito.when(services.getTableDescriptors()).thenReturn(tds);
     AssignmentManager am = Mockito.mock(AssignmentManager.class);
@@ -430,13 +420,6 @@ public class RSGroupableBalancerTestBase {
     Mockito.when(gm.listRSGroups()).thenReturn(
         Lists.newLinkedList(groupMap.values()));
     Mockito.when(gm.isOnline()).thenReturn(true);
-    Mockito.when(gm.getRSGroupOfTable(Mockito.any()))
-        .thenAnswer(new Answer<String>() {
-          @Override
-          public String answer(InvocationOnMock invocation) throws Throwable {
-            return tableMap.get(invocation.getArgument(0));
-          }
-        });
     return gm;
   }
 
@@ -444,15 +427,16 @@ public class RSGroupableBalancerTestBase {
     TableName tableName = null;
     RSGroupInfoManager gm = getMockedGroupInfoManager();
     RSGroupInfo groupOfServer = null;
-    for(RSGroupInfo gInfo : gm.listRSGroups()){
-      if(gInfo.containsServer(sn.getAddress())){
+    for (RSGroupInfo gInfo : gm.listRSGroups()) {
+      if (gInfo.containsServer(sn.getAddress())) {
         groupOfServer = gInfo;
         break;
       }
     }
 
-    for(TableDescriptor desc : tableDescs){
-      if(gm.getRSGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())){
+    for (TableDescriptor desc : tableDescs.values()) {
+      Optional<String> optGroupName = desc.getRegionServerGroup();
+      if (optGroupName.isPresent() && optGroupName.get().endsWith(groupOfServer.getName())) {
         tableName = desc.getTableName();
       }
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
index 4b88201..2345a39 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
@@ -96,33 +96,30 @@ public class TestRSGroupBasedLoadBalancer extends RSGroupableBalancerTestBase {
 
   /**
    * Tests the bulk assignment used during cluster startup.
-   *
-   * Round-robin. Should yield a balanced cluster so same invariant as the
-   * load balancer holds, all servers holding either floor(avg) or
-   * ceiling(avg).
+   * <p/>
+   * Round-robin. Should yield a balanced cluster so same invariant as the load balancer holds, all
+   * servers holding either floor(avg) or ceiling(avg).
    */
   @Test
   public void testBulkAssignment() throws Exception {
     List<RegionInfo> regions = randomRegions(25);
-    Map<ServerName, List<RegionInfo>> assignments = loadBalancer
-        .roundRobinAssignment(regions, servers);
-    //test empty region/servers scenario
-    //this should not throw an NPE
+    Map<ServerName, List<RegionInfo>> assignments =
+        loadBalancer.roundRobinAssignment(regions, servers);
+    // test empty region/servers scenario
+    // this should not throw an NPE
     loadBalancer.roundRobinAssignment(regions, Collections.emptyList());
-    //test regular scenario
+    // test regular scenario
     assertTrue(assignments.keySet().size() == servers.size());
     for (ServerName sn : assignments.keySet()) {
       List<RegionInfo> regionAssigned = assignments.get(sn);
       for (RegionInfo region : regionAssigned) {
         TableName tableName = region.getTable();
         String groupName =
-            getMockedGroupInfoManager().getRSGroupOfTable(tableName);
+            tableDescs.get(tableName).getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
         assertTrue(StringUtils.isNotEmpty(groupName));
-        RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(
-            groupName);
-        assertTrue(
-            "Region is not correctly assigned to group servers.",
-            gInfo.containsServer(sn.getAddress()));
+        RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName);
+        assertTrue("Region is not correctly assigned to group servers.",
+          gInfo.containsServer(sn.getAddress()));
       }
     }
     ArrayListMultimap<String, ServerAndLoad> loadMap = convertToGroupBasedMap(assignments);
@@ -158,24 +155,25 @@ public class TestRSGroupBasedLoadBalancer extends RSGroupableBalancerTestBase {
     onlineServers.addAll(servers);
     List<RegionInfo> regions = randomRegions(25);
     int bogusRegion = 0;
-    for(RegionInfo region : regions){
-      String group = tableMap.get(region.getTable());
-      if("dg3".equals(group) || "dg4".equals(group)){
+    for (RegionInfo region : regions) {
+      String group = tableDescs.get(region.getTable()).getRegionServerGroup()
+          .orElse(RSGroupInfo.DEFAULT_GROUP);
+      if ("dg3".equals(group) || "dg4".equals(group)) {
         bogusRegion++;
       }
     }
     Set<Address> offlineServers = new HashSet<Address>();
     offlineServers.addAll(groupMap.get("dg3").getServers());
     offlineServers.addAll(groupMap.get("dg4").getServers());
-    for(Iterator<ServerName> it =  onlineServers.iterator(); it.hasNext();){
+    for (Iterator<ServerName> it = onlineServers.iterator(); it.hasNext();) {
       ServerName server = it.next();
       Address address = server.getAddress();
-      if(offlineServers.contains(address)){
+      if (offlineServers.contains(address)) {
         it.remove();
       }
     }
-    Map<ServerName, List<RegionInfo>> assignments = loadBalancer
-        .roundRobinAssignment(regions, onlineServers);
+    Map<ServerName, List<RegionInfo>> assignments =
+        loadBalancer.roundRobinAssignment(regions, onlineServers);
     assertEquals(bogusRegion, assignments.get(LoadBalancer.BOGUS_SERVER_NAME).size());
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
index e588a7e..a4ae636 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -32,7 +33,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
@@ -98,7 +98,7 @@ public class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal
    * Test HBASE-20791
    */
   @Test
-  public void testBalanceCluster() throws HBaseIOException {
+  public void testBalanceCluster() throws IOException {
     // mock cluster State
     Map<ServerName, List<RegionInfo>> clusterState = new HashMap<ServerName, List<RegionInfo>>();
     ServerName serverA = servers.get(0);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
index 27511e3..7471458 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
@@ -29,7 +29,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.SortedSet;
-
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
index 4b4097f..076362e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
@@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.rsgroup.RSGroupAdminServer.DEFAULT_MAX_RET
 import static org.apache.hadoop.hbase.util.Threads.sleep;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -408,7 +407,9 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase {
     assertTrue(newGroupTables.contains(tableName));
 
     // verify that all region still assgin on targetServer
-    Assert.assertEquals(5, getTableServerRegionMap().get(tableName).get(targetServer).size());
+    // TODO: uncomment after we reimplement moveServersAndTables, now the implementation is
+    // moveServers first and then moveTables, so the region will be moved to other region servers.
+    // Assert.assertEquals(5, getTableServerRegionMap().get(tableName).get(targetServer).size());
 
     assertTrue(observer.preMoveServersAndTables);
     assertTrue(observer.postMoveServersAndTables);
@@ -503,61 +504,6 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase {
     });
   }
 
-  @Test
-  public void testFailedMoveBeforeRetryExhaustedWhenMoveTable() throws Exception {
-    final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 1);
-    Pair<ServerName, RegionStateNode> gotPair = createTableWithRegionSplitting(newGroup,
-        5);
-
-    // move table to group
-    Thread t2 = new Thread(() -> {
-      LOG.info("thread2 start running, to move regions");
-      try {
-        rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName());
-      } catch (IOException e) {
-        LOG.error("move server error", e);
-      }
-    });
-    t2.start();
-
-    // start thread to recover region state
-    final ServerName ss = gotPair.getFirst();
-    final RegionStateNode rsn = gotPair.getSecond();
-    AtomicBoolean changed = new AtomicBoolean(false);
-
-    Thread t1 = recoverRegionStateThread(ss, server -> {
-      List<RegionInfo> regions = master.getAssignmentManager().getRegionsOnServer(ss);
-      List<RegionInfo> tableRegions = new ArrayList<>();
-      for (RegionInfo regionInfo : regions) {
-        if (regionInfo.getTable().equals(tableName)) {
-          tableRegions.add(regionInfo);
-        }
-      }
-      return tableRegions;
-    }, rsn, changed);
-    t1.start();
-
-    t1.join();
-    t2.join();
-
-    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() {
-        if (changed.get()) {
-          boolean serverHasTableRegions = false;
-          for (RegionInfo regionInfo : master.getAssignmentManager().getRegionsOnServer(ss)) {
-            if (regionInfo.getTable().equals(tableName)) {
-              serverHasTableRegions = true;
-              break;
-            }
-          }
-          return !serverHasTableRegions && !rsn.getRegionLocation().equals(ss);
-        }
-        return false;
-      }
-    });
-  }
-
   private <T> Thread recoverRegionStateThread(T owner, Function<T, List<RegionInfo>> getRegions,
       RegionStateNode rsn, AtomicBoolean changed){
     return new Thread(() -> {
@@ -653,50 +599,6 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase {
   }
 
   @Test
-  public void testFailedMoveTablesAndRepair() throws Exception{
-    // This UT calls moveTables() twice to test the idempotency of it.
-    // The first time, movement fails because a region is made in SPLITTING state
-    // which will not be moved.
-    // The second time, the region state is OPEN and check if all
-    // regions on target group servers after the call.
-    final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 1);
-    Iterator iterator = newGroup.getServers().iterator();
-    Address newGroupServer1 = (Address) iterator.next();
-
-    // create table
-    // randomly set a region state to SPLITTING to make move abort
-    Pair<ServerName, RegionStateNode> gotPair = createTableWithRegionSplitting(newGroup,
-        new Random().nextInt(8) + 4);
-    RegionStateNode rsn = gotPair.getSecond();
-
-    // move table to newGroup and check regions
-    try {
-      rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName());
-      fail("should get IOException when retry exhausted but there still exists failed moved "
-          + "regions");
-    }catch (Exception e){
-      assertTrue(e.getMessage().contains(
-          gotPair.getSecond().getRegionInfo().getRegionNameAsString()));
-    }
-    for(RegionInfo regionInfo : master.getAssignmentManager().getAssignedRegions()){
-      if (regionInfo.getTable().equals(tableName) && regionInfo.equals(rsn.getRegionInfo())) {
-        assertNotEquals(master.getAssignmentManager().getRegionStates()
-            .getRegionServerOfRegion(regionInfo).getAddress(), newGroupServer1);
-      }
-    }
-
-    // retry move table to newGroup and check if all regions are corrected
-    rsn.setState(RegionState.State.OPEN);
-    rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName());
-    for(RegionInfo regionInfo : master.getAssignmentManager().getAssignedRegions()){
-      if (regionInfo.getTable().equals(tableName)) {
-        assertEquals(master.getAssignmentManager().getRegionStates()
-            .getRegionServerOfRegion(regionInfo).getAddress(), newGroupServer1);
-      }
-    }
-  }
-
-  @Test
   public void testFailedMoveServersAndRepair() throws Exception{
     // This UT calls moveServers() twice to test the idempotency of it.
     // The first time, movement fails because a region is made in SPLITTING state
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java
index 67f5c7e..8d10850 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java
@@ -45,8 +45,6 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-
 @Category({ MediumTests.class })
 public class TestRSGroupsBalance extends TestRSGroupsBase {
 
@@ -153,19 +151,21 @@ public class TestRSGroupsBalance extends TestRSGroupsBase {
 
   @Test
   public void testMisplacedRegions() throws Exception {
-    final TableName tableName = TableName.valueOf(tablePrefix + "_testMisplacedRegions");
-    LOG.info("testMisplacedRegions");
+    String namespace = tablePrefix + "_" + name.getMethodName();
+    TEST_UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build());
+    final TableName tableName =
+        TableName.valueOf(namespace, tablePrefix + "_" + name.getMethodName());
+    LOG.info(name.getMethodName());
 
-    final RSGroupInfo RSGroupInfo = addGroup("testMisplacedRegions", 1);
+    final RSGroupInfo rsGroupInfo = addGroup(name.getMethodName(), 1);
 
     TEST_UTIL.createMultiRegionTable(tableName, new byte[] { 'f' }, 15);
     TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
-
-    rsGroupAdminEndpoint.getGroupInfoManager().moveTables(Sets.newHashSet(tableName),
-      RSGroupInfo.getName());
+    TEST_UTIL.getAdmin().modifyNamespace(NamespaceDescriptor.create(namespace)
+        .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, rsGroupInfo.getName()).build());
 
     admin.balancerSwitch(true, true);
-    assertTrue(rsGroupAdmin.balanceRSGroup(RSGroupInfo.getName()));
+    assertTrue(rsGroupAdmin.balanceRSGroup(rsGroupInfo.getName()));
     admin.balancerSwitch(false, true);
     assertTrue(observer.preBalanceRSGroupCalled);
     assertTrue(observer.postBalanceRSGroupCalled);
@@ -174,7 +174,7 @@ public class TestRSGroupsBalance extends TestRSGroupsBase {
       @Override
       public boolean evaluate() throws Exception {
         ServerName serverName =
-          ServerName.valueOf(RSGroupInfo.getServers().iterator().next().toString(), 1);
+            ServerName.valueOf(rsGroupInfo.getServers().iterator().next().toString(), 1);
         return admin.getConnection().getAdmin().getRegions(serverName).size() == 15;
       }
     });
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 0e4fb34..b91cd5e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -74,7 +74,7 @@ public abstract class TestRSGroupsBase {
   protected static HBaseTestingUtility TEST_UTIL;
   protected static Admin admin;
   protected static HBaseCluster cluster;
-  protected static RSGroupAdmin rsGroupAdmin;
+  protected static RSGroupAdminClient rsGroupAdmin;
   protected static HMaster master;
   protected boolean INIT = false;
   protected static RSGroupAdminEndpoint rsGroupAdminEndpoint;
@@ -188,8 +188,8 @@ public abstract class TestRSGroupsBase {
     RSGroupInfo defaultInfo = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
     rsGroupAdmin.addRSGroup(groupName);
     Set<Address> set = new HashSet<>();
-    for(Address server: defaultInfo.getServers()) {
-      if(set.size() == serverCount) {
+    for (Address server : defaultInfo.getServers()) {
+      if (set.size() == serverCount) {
         break;
       }
       set.add(server);
@@ -222,7 +222,7 @@ public abstract class TestRSGroupsBase {
   }
 
   protected void deleteGroups() throws IOException {
-    RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection());
+    RSGroupAdminClient groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection());
     for(RSGroupInfo group: groupAdmin.listRSGroups()) {
       if(!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
         groupAdmin.moveTables(group.getTables(), RSGroupInfo.DEFAULT_GROUP);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
index 60887e4..d3577f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.rsgroup;
 
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -113,7 +112,7 @@ public class TestRSGroupsOfflineMode {
     final HRegionServer groupRS = ((MiniHBaseCluster) cluster).getRegionServer(1);
     final HRegionServer failoverRS = ((MiniHBaseCluster) cluster).getRegionServer(2);
     String newGroup = "my_group";
-    RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection());
+    RSGroupAdminClient groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection());
     groupAdmin.addRSGroup(newGroup);
     if (master.getAssignmentManager().getRegionStates().getRegionAssignments()
       .containsValue(failoverRS.getServerName())) {
@@ -168,9 +167,6 @@ public class TestRSGroupsOfflineMode {
       .getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class).getGroupInfoManager();
     // Make sure balancer is in offline mode, since this is what we're testing.
     assertFalse(groupMgr.isOnline());
-    // Verify the group affiliation that's loaded from ZK instead of tables.
-    assertEquals(newGroup, groupMgr.getRSGroupOfTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME));
-    assertEquals(RSGroupInfo.DEFAULT_GROUP, groupMgr.getRSGroupOfTable(failoverTable));
     // Kill final regionserver to see the failover happens for all tables except GROUP table since
     // it's group does not have any online RS.
     killRS.stop("die");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index fcaf1a7..a8cd277 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -17,17 +17,26 @@
  */
 package org.apache.hadoop.hbase.rsgroup;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -37,22 +46,20 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
-import org.junit.Assert;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 
 @InterfaceAudience.Private
-public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
-  private Table table;
+public class VerifyingRSGroupAdminClient extends RSGroupAdminClient {
+  private Connection conn;
   private ZKWatcher zkw;
-  private RSGroupAdmin wrapped;
+  private RSGroupAdminClient wrapped;
 
-  public VerifyingRSGroupAdminClient(RSGroupAdmin RSGroupAdmin, Configuration conf)
+  public VerifyingRSGroupAdminClient(RSGroupAdminClient RSGroupAdmin, Configuration conf)
       throws IOException {
     wrapped = RSGroupAdmin;
-    table = ConnectionFactory.createConnection(conf)
-        .getTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME);
+    conn = ConnectionFactory.createConnection(conf);
     zkw = new ZKWatcher(conf, this.getClass().getSimpleName(), null);
   }
 
@@ -121,31 +128,41 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
   public void verify() throws IOException {
     Map<String, RSGroupInfo> groupMap = Maps.newHashMap();
     Set<RSGroupInfo> zList = Sets.newHashSet();
-
-    for (Result result : table.getScanner(new Scan())) {
-      RSGroupProtos.RSGroupInfo proto =
-          RSGroupProtos.RSGroupInfo.parseFrom(
-              result.getValue(
-                  RSGroupInfoManagerImpl.META_FAMILY_BYTES,
-                  RSGroupInfoManagerImpl.META_QUALIFIER_BYTES));
-      groupMap.put(proto.getName(), ProtobufUtil.toGroupInfo(proto));
+    List<TableDescriptor> tds = new ArrayList<>();
+    try (Admin admin = conn.getAdmin()) {
+      tds.addAll(admin.listTableDescriptors());
+      tds.addAll(admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME));
+    }
+    try (Table table = conn.getTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME);
+        ResultScanner scanner = table.getScanner(new Scan())) {
+      for (;;) {
+        Result result = scanner.next();
+        if (result == null) {
+          break;
+        }
+        RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(result.getValue(
+          RSGroupInfoManagerImpl.META_FAMILY_BYTES, RSGroupInfoManagerImpl.META_QUALIFIER_BYTES));
+        RSGroupInfo rsGroupInfo = ProtobufUtil.toGroupInfo(proto);
+        groupMap.put(proto.getName(), RSGroupUtil.fillTables(rsGroupInfo, tds));
+      }
     }
-    Assert.assertEquals(Sets.newHashSet(groupMap.values()),
-        Sets.newHashSet(wrapped.listRSGroups()));
+    assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(wrapped.listRSGroups()));
     try {
       String groupBasePath = ZNodePaths.joinZNode(zkw.getZNodePaths().baseZNode, "rsgroup");
-      for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
+      for (String znode : ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
         byte[] data = ZKUtil.getData(zkw, ZNodePaths.joinZNode(groupBasePath, znode));
-        if(data.length > 0) {
+        if (data.length > 0) {
           ProtobufUtil.expectPBMagicPrefix(data);
-          ByteArrayInputStream bis = new ByteArrayInputStream(
-              data, ProtobufUtil.lengthOfPBMagic(), data.length);
-          zList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
+          ByteArrayInputStream bis =
+              new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
+          RSGroupInfo rsGroupInfo =
+              ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis));
+          zList.add(RSGroupUtil.fillTables(rsGroupInfo, tds));
         }
       }
-      Assert.assertEquals(zList.size(), groupMap.size());
-      for(RSGroupInfo RSGroupInfo : zList) {
-        Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo));
+      assertEquals(zList.size(), groupMap.size());
+      for (RSGroupInfo rsGroupInfo : zList) {
+        assertTrue(groupMap.get(rsGroupInfo.getName()).equals(rsGroupInfo));
       }
     } catch (KeeperException e) {
       throw new IOException("ZK verification failed", e);


[hbase] 08/11: HBASE-22729 Start RSGroupInfoManager as default (#555)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 6bb0e70134cedb4e22dc7d0817e03899a7fc0775
Author: Guanghao Zhang <zg...@apache.org>
AuthorDate: Tue Sep 3 11:39:19 2019 +0800

    HBASE-22729 Start RSGroupInfoManager as default (#555)
    
    Amending-Author: Duo Zhang <zh...@apache.org>
    Signed-off-by: stack <st...@apache.org>
---
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java  | 10 ++++++++++
 .../java/org/apache/hadoop/hbase/master/MasterServices.java    |  7 +++++--
 .../org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java  |  2 +-
 .../org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java    |  5 +++++
 .../org/apache/hadoop/hbase/master/MockNoopMasterServices.java |  6 ++++++
 5 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 1562d92..8282f12 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -188,6 +188,7 @@ import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
 import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
 import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.SecurityConstants;
 import org.apache.hadoop.hbase.security.UserProvider;
@@ -355,6 +356,8 @@ public class HMaster extends HRegionServer implements MasterServices {
   // manager of assignment nodes in zookeeper
   private AssignmentManager assignmentManager;
 
+  private RSGroupInfoManager rsGroupInfoManager;
+
   // manager of replication
   private ReplicationPeerManager replicationPeerManager;
 
@@ -778,6 +781,8 @@ public class HMaster extends HRegionServer implements MasterServices {
     this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);
     this.splitOrMergeTracker.start();
 
+    this.rsGroupInfoManager = RSGroupInfoManager.create(this);
+
     this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);
 
     this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);
@@ -3851,4 +3856,9 @@ public class HMaster extends HRegionServer implements MasterServices {
   public HbckChore getHbckChore() {
     return this.hbckChore;
   }
+
+  @Override
+  public RSGroupInfoManager getRSRSGroupInfoManager() {
+    return rsGroupInfoManager;
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 41cec5c..ead40b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -18,10 +18,8 @@
 package org.apache.hadoop.hbase.master;
 
 import com.google.protobuf.Service;
-
 import java.io.IOException;
 import java.util.List;
-
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -51,6 +49,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
 import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -537,4 +536,8 @@ public interface MasterServices extends Server {
    */
   List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans);
 
+  /**
+   * @return the {@link RSGroupInfoManager}
+   */
+  RSGroupInfoManager getRSRSGroupInfoManager();
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index a2a5623..0bde67b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -66,7 +66,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
     }
 
     master = ((HasMasterServices) env).getMasterServices();
-    groupInfoManager = RSGroupInfoManagerImpl.getInstance(master);
+    groupInfoManager = master.getRSRSGroupInfoManager();
     groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
     Class<?> clazz =
       master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 1b9f3ef..a46fa4b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Set;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -87,4 +88,8 @@ public interface RSGroupInfoManager {
    */
   @Deprecated
   RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException;
+
+  static RSGroupInfoManager create(MasterServices master) throws IOException {
+    return RSGroupInfoManagerImpl.getInstance(master);
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index cbfdd3f..26772ae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
 import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -490,4 +491,9 @@ public class MockNoopMasterServices implements MasterServices {
   public AsyncClusterConnection getAsyncClusterConnection() {
     return null;
   }
+
+  @Override
+  public RSGroupInfoManager getRSRSGroupInfoManager() {
+    return null;
+  }
 }
\ No newline at end of file


[hbase] 03/11: HBASE-22676 Move all the code in hbase-rsgroup to hbase-server and remove hbase-rsgroup module (#399)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch HBASE-22514
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit ea154c0ad6a7e135ca425e549a0278100f77ca77
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Tue Jul 23 09:40:02 2019 +0800

    HBASE-22676 Move all the code in hbase-rsgroup to hbase-server and remove hbase-rsgroup module (#399)
    
    Signed-off-by: Zheng Hu <op...@gmail.com>
---
 hbase-assembly/src/main/assembly/components.xml    |   8 -
 .../src/main/assembly/hadoop-two-compat.xml        |   1 -
 hbase-it/pom.xml                                   |  10 -
 hbase-rsgroup/README.txt                           |  13 -
 hbase-rsgroup/pom.xml                              | 262 ----------
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 559 ---------------------
 hbase-rsgroup/src/test/resources/hbase-site.xml    |  32 --
 hbase-rsgroup/src/test/resources/log4j.properties  |  68 ---
 .../apache/hadoop/hbase/rsgroup/RSGroupAdmin.java  |   0
 .../hadoop/hbase/rsgroup/RSGroupAdminClient.java   |   0
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 226 +++++++++
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java   |   0
 .../hbase/rsgroup/RSGroupAdminServiceImpl.java     | 378 ++++++++++++++
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java    |  24 +-
 .../hbase/rsgroup/RSGroupMajorCompactionTTL.java   |   0
 .../hadoop/hbase/rsgroup/RSGroupableBalancer.java  |   0
 .../balancer/RSGroupableBalancerTestBase.java      |   0
 .../balancer/TestRSGroupBasedLoadBalancer.java     |   0
 ...lancerWithStochasticLoadBalancerAsInternal.java |   0
 .../procedure/TestSCPWithReplicasWithRSGroup.java  |   0
 .../hadoop/hbase/rsgroup/TestEnableRSGroups.java   |   0
 .../rsgroup/TestRSGroupMajorCompactionTTL.java     |   0
 .../hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java   |   0
 .../hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java   |   0
 .../hadoop/hbase/rsgroup/TestRSGroupsBalance.java  |   0
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java     |   0
 .../hadoop/hbase/rsgroup/TestRSGroupsBasics.java   |   0
 .../hadoop/hbase/rsgroup/TestRSGroupsKillRS.java   |   0
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java     |   0
 .../hadoop/hbase/rsgroup/TestRSGroupsWithACL.java  |  77 ++-
 .../hbase/rsgroup/VerifyingRSGroupAdminClient.java |   0
 hbase-shell/pom.xml                                |  35 --
 pom.xml                                            |  24 -
 33 files changed, 647 insertions(+), 1070 deletions(-)

diff --git a/hbase-assembly/src/main/assembly/components.xml b/hbase-assembly/src/main/assembly/components.xml
index 2eb16e7..4dd85ef 100644
--- a/hbase-assembly/src/main/assembly/components.xml
+++ b/hbase-assembly/src/main/assembly/components.xml
@@ -145,14 +145,6 @@
       <fileMode>0644</fileMode>
     </fileSet>
     <fileSet>
-      <directory>${project.basedir}/../hbase-rsgroup/target/</directory>
-      <outputDirectory>lib</outputDirectory>
-      <includes>
-        <include>${rsgroup.test.jar}</include>
-      </includes>
-      <fileMode>0644</fileMode>
-    </fileSet>
-    <fileSet>
       <directory>${project.basedir}/../hbase-mapreduce/target/</directory>
       <outputDirectory>lib</outputDirectory>
       <includes>
diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
index 91d3749..6a21b92 100644
--- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
+++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
@@ -52,7 +52,6 @@
         <include>org.apache.hbase:hbase-protocol-shaded</include>
         <include>org.apache.hbase:hbase-replication</include>
         <include>org.apache.hbase:hbase-rest</include>
-        <include>org.apache.hbase:hbase-rsgroup</include>
         <include>org.apache.hbase:hbase-server</include>
         <include>org.apache.hbase:hbase-shell</include>
         <include>org.apache.hbase:hbase-testing-util</include>
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index 0fe578a..6b173f0 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -183,16 +183,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-rsgroup</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-rsgroup</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
     </dependency>
     <dependency>
diff --git a/hbase-rsgroup/README.txt b/hbase-rsgroup/README.txt
deleted file mode 100644
index b24aee6..0000000
--- a/hbase-rsgroup/README.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-ON PROTOBUFS
-This maven module has protobuf definition files ('.protos') used by hbase
-Coprocessor Endpoints that ship with hbase core including tests. Coprocessor
-Endpoints are meant to be standalone, independent code not reliant on hbase
-internals. They define their Service using protobuf. The protobuf version
-they use can be distinct from that used by HBase internally since HBase started
-shading its protobuf references. Endpoints have no access to the shaded protobuf
-hbase uses. They do have access to the content of hbase-protocol -- the
-.protos found in here -- but avoid using as much of this as you can as it is
-liable to change.
-
-Generation of java files from protobuf .proto files included here is done as
-part of the build.
diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml
deleted file mode 100644
index 1135939..0000000
--- a/hbase-rsgroup/pom.xml
+++ /dev/null
@@ -1,262 +0,0 @@
-<?xml version="1.0"?>
-<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <!--
-  /**
-   * Licensed to the Apache Software Foundation (ASF) under one
-   * or more contributor license agreements.  See the NOTICE file
-   * distributed with this work for additional information
-   * regarding copyright ownership.  The ASF licenses this file
-   * to you under the Apache License, Version 2.0 (the
-   * "License"); you may not use this file except in compliance
-   * with the License.  You may obtain a copy of the License at
-   *
-   *     http://www.apache.org/licenses/LICENSE-2.0
-   *
-   * Unless required by applicable law or agreed to in writing, software
-   * distributed under the License is distributed on an "AS IS" BASIS,
-   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   * See the License for the specific language governing permissions and
-   * limitations under the License.
-   */
-  -->
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <artifactId>hbase-build-configuration</artifactId>
-    <groupId>org.apache.hbase</groupId>
-    <version>3.0.0-SNAPSHOT</version>
-    <relativePath>../hbase-build-configuration</relativePath>
-  </parent>
-  <artifactId>hbase-rsgroup</artifactId>
-  <name>Apache HBase - RSGroup</name>
-  <description>Regionserver Groups for HBase</description>
-  <build>
-    <plugins>
-      <plugin>
-        <!--Make it so assembly:single does nothing in here-->
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <skipAssembly>true</skipAssembly>
-        </configuration>
-      </plugin>
-      <!-- Make a jar and put the sources in the jar -->
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-source-plugin</artifactId>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <failOnViolation>true</failOnViolation>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>net.revelc.code</groupId>
-        <artifactId>warbucks-maven-plugin</artifactId>
-      </plugin>
-    </plugins>
-  </build>
-  <dependencies>
-    <!-- Intra-project dependencies -->
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-annotations</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-server</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-procedure</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-procedure</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-protocol</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-protocol-shaded</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-testing-util</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <!-- General dependencies -->
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-lang3</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase.thirdparty</groupId>
-      <artifactId>hbase-shaded-miscellaneous</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.zookeeper</groupId>
-      <artifactId>zookeeper</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-  <profiles>
-    <!-- Skip the tests in this module -->
-    <profile>
-      <id>skipRSGroupTests</id>
-      <activation>
-        <property>
-          <name>skipRSGroupTests</name>
-        </property>
-      </activation>
-      <properties>
-        <surefire.skipFirstPart>true</surefire.skipFirstPart>
-        <surefire.skipSecondPart>true</surefire.skipSecondPart>
-      </properties>
-    </profile>
-    <!-- profile against Hadoop 2.x: This is the default. -->
-    <profile>
-      <id>hadoop-2.0</id>
-      <activation>
-        <property>
-          <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
-          <!--h2-->
-          <name>!hadoop.profile</name>
-        </property>
-      </activation>
-      <dependencies>
-        <dependency>
-          <groupId>com.github.stephenc.findbugs</groupId>
-          <artifactId>findbugs-annotations</artifactId>
-          <optional>true</optional>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-common</artifactId>
-          <exclusions>
-            <exclusion>
-              <groupId>net.java.dev.jets3t</groupId>
-              <artifactId>jets3t</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>javax.servlet.jsp</groupId>
-              <artifactId>jsp-api</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>org.mortbay.jetty</groupId>
-              <artifactId>jetty</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>com.sun.jersey</groupId>
-              <artifactId>jersey-server</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>com.sun.jersey</groupId>
-              <artifactId>jersey-core</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>com.sun.jersey</groupId>
-              <artifactId>jersey-json</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>javax.servlet</groupId>
-              <artifactId>servlet-api</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>tomcat</groupId>
-              <artifactId>jasper-compiler</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>tomcat</groupId>
-              <artifactId>jasper-runtime</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>com.google.code.findbugs</groupId>
-              <artifactId>jsr305</artifactId>
-            </exclusion>
-          </exclusions>
-        </dependency>
-      </dependencies>
-    </profile>
-    <!--
-      profile for building against Hadoop 3.0.x. Activate using:
-       mvn -Dhadoop.profile=3.0
-    -->
-    <profile>
-      <id>hadoop-3.0</id>
-      <activation>
-        <property>
-          <name>hadoop.profile</name>
-          <value>3.0</value>
-        </property>
-      </activation>
-      <properties>
-        <hadoop.version>3.0-SNAPSHOT</hadoop.version>
-      </properties>
-      <dependencies>
-        <dependency>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-common</artifactId>
-        </dependency>
-      </dependencies>
-    </profile>
-    <profile>
-      <id>eclipse-specific</id>
-      <activation>
-        <property>
-          <name>m2e.version</name>
-        </property>
-      </activation>
-      <build>
-        <pluginManagement>
-          <plugins>
-            <!--This plugin's configuration is used to store Eclipse m2e settings
-                 only. It has no influence on the Maven build itself.-->
-            <plugin>
-              <groupId>org.eclipse.m2e</groupId>
-              <artifactId>lifecycle-mapping</artifactId>
-              <configuration>
-                <lifecycleMappingMetadata>
-                  <pluginExecutions>
-                  </pluginExecutions>
-                </lifecycleMappingMetadata>
-              </configuration>
-            </plugin>
-          </plugins>
-        </pluginManagement>
-      </build>
-    </profile>
-  </profiles>
-</project>
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
deleted file mode 100644
index 8c6e010..0000000
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rsgroup;
-
-import com.google.protobuf.RpcCallback;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.Service;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
-import java.util.stream.Collectors;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.PleaseHoldException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.SnapshotDescription;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.constraint.ConstraintException;
-import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
-import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.MasterObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-
-// TODO: Encapsulate MasterObserver functions into separate subclass.
-@CoreCoprocessor
-@InterfaceAudience.Private
-public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
-  private static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminEndpoint.class);
-
-  private MasterServices master = null;
-  // Only instance of RSGroupInfoManager. RSGroup aware load balancers ask for this instance on
-  // their setup.
-  private RSGroupInfoManager groupInfoManager;
-  private RSGroupAdminServer groupAdminServer;
-  private final RSGroupAdminService groupAdminService = new RSGroupAdminServiceImpl();
-  private AccessChecker accessChecker;
-
-  /** Provider for mapping principal names to Users */
-  private UserProvider userProvider;
-
-  @Override
-  public void start(CoprocessorEnvironment env) throws IOException {
-    if (!(env instanceof HasMasterServices)) {
-      throw new IOException("Does not implement HMasterServices");
-    }
-
-    master = ((HasMasterServices)env).getMasterServices();
-    groupInfoManager = RSGroupInfoManagerImpl.getInstance(master);
-    groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
-    Class<?> clazz =
-        master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
-    if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
-      throw new IOException("Configured balancer does not support RegionServer groups.");
-    }
-    accessChecker = ((HasMasterServices) env).getMasterServices().getAccessChecker();
-
-    // set the user-provider.
-    this.userProvider = UserProvider.instantiate(env.getConfiguration());
-  }
-
-  @Override
-  public void stop(CoprocessorEnvironment env) {
-  }
-
-  @Override
-  public Iterable<Service> getServices() {
-    return Collections.singleton(groupAdminService);
-  }
-
-  @Override
-  public Optional<MasterObserver> getMasterObserver() {
-    return Optional.of(this);
-  }
-
-  RSGroupInfoManager getGroupInfoManager() {
-    return groupInfoManager;
-  }
-
-  /**
-   * Implementation of RSGroupAdminService defined in RSGroupAdmin.proto.
-   * This class calls {@link RSGroupAdminServer} for actual work, converts result to protocol
-   * buffer response, handles exceptions if any occurred and then calls the {@code RpcCallback} with
-   * the response.
-   */
-  private class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
-    @Override
-    public void getRSGroupInfo(RpcController controller,
-        GetRSGroupInfoRequest request, RpcCallback<GetRSGroupInfoResponse> done) {
-      GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder();
-      String groupName = request.getRSGroupName();
-      LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group="
-              + groupName);
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName);
-        }
-        checkPermission("getRSGroupInfo");
-        RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
-        if (rsGroupInfo != null) {
-          builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
-        }
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void getRSGroupInfoOfTable(RpcController controller,
-        GetRSGroupInfoOfTableRequest request, RpcCallback<GetRSGroupInfoOfTableResponse> done) {
-      GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder();
-      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
-      LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table="
-          + tableName);
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
-        }
-        checkPermission("getRSGroupInfoOfTable");
-        RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName);
-        if (RSGroupInfo != null) {
-          builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
-        }
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void moveServers(RpcController controller, MoveServersRequest request,
-        RpcCallback<MoveServersResponse> done) {
-      MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
-      Set<Address> hostPorts = Sets.newHashSet();
-      for (HBaseProtos.ServerName el : request.getServersList()) {
-        hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
-      }
-      LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +" to rsgroup "
-          + request.getTargetGroup());
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
-        }
-        checkPermission("moveServers");
-        groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void moveTables(RpcController controller, MoveTablesRequest request,
-        RpcCallback<MoveTablesResponse> done) {
-      MoveTablesResponse.Builder builder = MoveTablesResponse.newBuilder();
-      Set<TableName> tables = new HashSet<>(request.getTableNameList().size());
-      for (HBaseProtos.TableName tableName : request.getTableNameList()) {
-        tables.add(ProtobufUtil.toTableName(tableName));
-      }
-      LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +" to rsgroup "
-          + request.getTargetGroup());
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup());
-        }
-        checkPermission("moveTables");
-        groupAdminServer.moveTables(tables, request.getTargetGroup());
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postMoveTables(tables, request.getTargetGroup());
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void addRSGroup(RpcController controller, AddRSGroupRequest request,
-        RpcCallback<AddRSGroupResponse> done) {
-      AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
-      LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
-        }
-        checkPermission("addRSGroup");
-        groupAdminServer.addRSGroup(request.getRSGroupName());
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void removeRSGroup(RpcController controller,
-        RemoveRSGroupRequest request, RpcCallback<RemoveRSGroupResponse> done) {
-      RemoveRSGroupResponse.Builder builder =
-          RemoveRSGroupResponse.newBuilder();
-      LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
-        }
-        checkPermission("removeRSGroup");
-        groupAdminServer.removeRSGroup(request.getRSGroupName());
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void balanceRSGroup(RpcController controller,
-        BalanceRSGroupRequest request, RpcCallback<BalanceRSGroupResponse> done) {
-      BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder();
-      LOG.info(master.getClientIdAuditPrefix() + " balance rsgroup, group=" +
-              request.getRSGroupName());
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName());
-        }
-        checkPermission("balanceRSGroup");
-        boolean balancerRan = groupAdminServer.balanceRSGroup(request.getRSGroupName());
-        builder.setBalanceRan(balancerRan);
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(),
-              balancerRan);
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-        builder.setBalanceRan(false);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void listRSGroupInfos(RpcController controller,
-        ListRSGroupInfosRequest request, RpcCallback<ListRSGroupInfosResponse> done) {
-      ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
-      LOG.info(master.getClientIdAuditPrefix() + " list rsgroup");
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preListRSGroups();
-        }
-        checkPermission("listRSGroup");
-        for (RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) {
-          builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
-        }
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postListRSGroups();
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void getRSGroupInfoOfServer(RpcController controller,
-        GetRSGroupInfoOfServerRequest request, RpcCallback<GetRSGroupInfoOfServerResponse> done) {
-      GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder();
-      Address hp = Address.fromParts(request.getServer().getHostName(),
-          request.getServer().getPort());
-      LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server="
-          + hp);
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
-        }
-        checkPermission("getRSGroupInfoOfServer");
-        RSGroupInfo info = groupAdminServer.getRSGroupOfServer(hp);
-        if (info != null) {
-          builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(info));
-        }
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void moveServersAndTables(RpcController controller,
-        MoveServersAndTablesRequest request, RpcCallback<MoveServersAndTablesResponse> done) {
-      MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder();
-      Set<Address> hostPorts = Sets.newHashSet();
-      for (HBaseProtos.ServerName el : request.getServersList()) {
-        hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
-      }
-      Set<TableName> tables = new HashSet<>(request.getTableNameList().size());
-      for (HBaseProtos.TableName tableName : request.getTableNameList()) {
-        tables.add(ProtobufUtil.toTableName(tableName));
-      }
-      LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts
-          + " and tables " + tables + " to rsgroup" + request.getTargetGroup());
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables,
-              request.getTargetGroup());
-        }
-        checkPermission("moveServersAndTables");
-        groupAdminServer.moveServersAndTables(hostPorts, tables, request.getTargetGroup());
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postMoveServersAndTables(hostPorts, tables,
-              request.getTargetGroup());
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-
-    @Override
-    public void removeServers(RpcController controller,
-        RemoveServersRequest request,
-        RpcCallback<RemoveServersResponse> done) {
-      RemoveServersResponse.Builder builder =
-          RemoveServersResponse.newBuilder();
-      Set<Address> servers = Sets.newHashSet();
-      for (HBaseProtos.ServerName el : request.getServersList()) {
-        servers.add(Address.fromParts(el.getHostName(), el.getPort()));
-      }
-      LOG.info(master.getClientIdAuditPrefix()
-          + " remove decommissioned servers from rsgroup: " + servers);
-      try {
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().preRemoveServers(servers);
-        }
-        checkPermission("removeServers");
-        groupAdminServer.removeServers(servers);
-        if (master.getMasterCoprocessorHost() != null) {
-          master.getMasterCoprocessorHost().postRemoveServers(servers);
-        }
-      } catch (IOException e) {
-        CoprocessorRpcUtils.setControllerException(controller, e);
-      }
-      done.run(builder.build());
-    }
-  }
-
-  boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException {
-    String groupName;
-    try {
-      groupName =
-        master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
-        .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
-      if (groupName == null) {
-        groupName = RSGroupInfo.DEFAULT_GROUP;
-      }
-    } catch (MasterNotRunningException | PleaseHoldException e) {
-      LOG.info("Master has not initialized yet; temporarily using default RSGroup '" +
-          RSGroupInfo.DEFAULT_GROUP + "' for deploy of system table");
-      groupName = RSGroupInfo.DEFAULT_GROUP;
-    }
-
-    RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
-    if (rsGroupInfo == null) {
-      throw new ConstraintException(
-          "Default RSGroup (" + groupName + ") for this table's " + "namespace does not exist.");
-    }
-
-    for (ServerName onlineServer : master.getServerManager().createDestinationServersList()) {
-      if (rsGroupInfo.getServers().contains(onlineServer.getAddress())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  void assignTableToGroup(TableDescriptor desc) throws IOException {
-    String groupName =
-        master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
-                .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
-    if (groupName == null) {
-      groupName = RSGroupInfo.DEFAULT_GROUP;
-    }
-    RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
-    if (rsGroupInfo == null) {
-      throw new ConstraintException("Default RSGroup (" + groupName + ") for this table's "
-          + "namespace does not exist.");
-    }
-    if (!rsGroupInfo.containsTable(desc.getTableName())) {
-      LOG.debug("Pre-moving table " + desc.getTableName() + " to RSGroup " + groupName);
-      groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), groupName);
-    }
-  }
-
-  /////////////////////////////////////////////////////////////////////////////
-  // MasterObserver overrides
-  /////////////////////////////////////////////////////////////////////////////
-
-  @Override
-  public void preCreateTableAction(
-      final ObserverContext<MasterCoprocessorEnvironment> ctx,
-      final TableDescriptor desc,
-      final RegionInfo[] regions) throws IOException {
-    if (!desc.getTableName().isSystemTable() && !rsgroupHasServersOnline(desc)) {
-      throw new HBaseIOException("No online servers in the rsgroup, which table " +
-          desc.getTableName().getNameAsString() + " belongs to");
-    }
-  }
-
-  // Assign table to default RSGroup.
-  @Override
-  public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      TableDescriptor desc, RegionInfo[] regions) throws IOException {
-    assignTableToGroup(desc);
-  }
-
-  // Remove table from its RSGroup.
-  @Override
-  public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
-                              TableName tableName) throws IOException {
-    try {
-      RSGroupInfo group = groupAdminServer.getRSGroupInfoOfTable(tableName);
-      if (group != null) {
-        LOG.debug(String.format("Removing deleted table '%s' from rsgroup '%s'", tableName,
-            group.getName()));
-        groupAdminServer.moveTables(Sets.newHashSet(tableName), null);
-      }
-    } catch (IOException ex) {
-      LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex);
-    }
-  }
-
-  @Override
-  public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
-                                 NamespaceDescriptor ns) throws IOException {
-    String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
-    if(group != null && groupAdminServer.getRSGroupInfo(group) == null) {
-      throw new ConstraintException("Region server group "+group+" does not exit");
-    }
-  }
-
-  @Override
-  public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      NamespaceDescriptor currentNsDesc, NamespaceDescriptor newNsDesc) throws IOException {
-    preCreateNamespace(ctx, newNsDesc);
-  }
-
-  @Override
-  public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      SnapshotDescription snapshot, TableDescriptor desc) throws IOException {
-    assignTableToGroup(desc);
-  }
-
-  @Override
-  public void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      List<ServerName> servers, List<ServerName> notClearedServers)
-      throws IOException {
-    Set<Address> clearedServer = servers.stream().
-        filter(server -> !notClearedServers.contains(server)).
-        map(ServerName::getAddress).
-        collect(Collectors.toSet());
-    if(!clearedServer.isEmpty()) {
-      groupAdminServer.removeServers(clearedServer);
-    }
-  }
-
-  public void checkPermission(String request) throws IOException {
-    accessChecker.requirePermission(getActiveUser(), request, null, Action.ADMIN);
-  }
-
-  /**
-   * Returns the active user to which authorization checks should be applied.
-   * If we are in the context of an RPC call, the remote user is used,
-   * otherwise the currently logged in user is used.
-   */
-  private User getActiveUser() throws IOException {
-    // for non-rpc handling, fallback to system user
-    Optional<User> optionalUser = RpcServer.getRequestUser();
-    if (optionalUser.isPresent()) {
-      return optionalUser.get();
-    }
-    return userProvider.getCurrent();
-  }
-}
diff --git a/hbase-rsgroup/src/test/resources/hbase-site.xml b/hbase-rsgroup/src/test/resources/hbase-site.xml
deleted file mode 100644
index 99d2ab8..0000000
--- a/hbase-rsgroup/src/test/resources/hbase-site.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>hbase.hconnection.threads.keepalivetime</name>
-    <value>3</value>
-  </property>
-</configuration>
diff --git a/hbase-rsgroup/src/test/resources/log4j.properties b/hbase-rsgroup/src/test/resources/log4j.properties
deleted file mode 100644
index c322699..0000000
--- a/hbase-rsgroup/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hbase.root.logger=INFO,console
-hbase.log.dir=.
-hbase.log.file=hbase.log
-
-# Define the root logger to the system property "hbase.root.logger".
-log4j.rootLogger=${hbase.root.logger}
-
-# Logging Threshold
-log4j.threshold=ALL
-
-#
-# Daily Rolling File Appender
-#
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
-
-# Custom Logging levels
-
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-
-log4j.logger.org.apache.hadoop=WARN
-log4j.logger.org.apache.zookeeper=ERROR
-log4j.logger.org.apache.hadoop.hbase=DEBUG
-
-#These settings are workarounds against spurious logs from the minicluster.
-#See HBASE-4709
-log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=WARN
-log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSinkAdapter=WARN
-log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=WARN
-log4j.logger.org.apache.hadoop.metrics2.util.MBeans=WARN
-# Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
similarity index 100%
rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
similarity index 100%
rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
new file mode 100644
index 0000000..2d5af04
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.protobuf.Service;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.access.AccessChecker;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+
+// TODO: Encapsulate MasterObserver functions into separate subclass.
+@CoreCoprocessor
+@InterfaceAudience.Private
+public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
+  static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminEndpoint.class);
+
+  private MasterServices master;
+  // Only instance of RSGroupInfoManager. RSGroup aware load balancers ask for this instance on
+  // their setup.
+  private RSGroupInfoManager groupInfoManager;
+  private RSGroupAdminServer groupAdminServer;
+  private RSGroupAdminServiceImpl groupAdminService = new RSGroupAdminServiceImpl();
+
+  @Override
+  public void start(CoprocessorEnvironment env) throws IOException {
+    if (!(env instanceof HasMasterServices)) {
+      throw new IOException("Does not implement HMasterServices");
+    }
+
+    master = ((HasMasterServices) env).getMasterServices();
+    groupInfoManager = RSGroupInfoManagerImpl.getInstance(master);
+    groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
+    Class<?> clazz =
+        master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
+    if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
+      throw new IOException("Configured balancer does not support RegionServer groups.");
+    }
+    AccessChecker accessChecker = ((HasMasterServices) env).getMasterServices().getAccessChecker();
+
+    // set the user-provider.
+    UserProvider userProvider = UserProvider.instantiate(env.getConfiguration());
+    groupAdminService.initialize(master, groupAdminServer, accessChecker, userProvider);
+  }
+
+  @Override
+  public void stop(CoprocessorEnvironment env) {
+  }
+
+  @Override
+  public Iterable<Service> getServices() {
+    return Collections.singleton(groupAdminService);
+  }
+
+  @Override
+  public Optional<MasterObserver> getMasterObserver() {
+    return Optional.of(this);
+  }
+
+  RSGroupInfoManager getGroupInfoManager() {
+    return groupInfoManager;
+  }
+
+  @VisibleForTesting
+  RSGroupAdminServiceImpl getGroupAdminService() {
+    return groupAdminService;
+  }
+
+  private void assignTableToGroup(TableDescriptor desc) throws IOException {
+    String groupName =
+        master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
+            .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
+    if (groupName == null) {
+      groupName = RSGroupInfo.DEFAULT_GROUP;
+    }
+    RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
+    if (rsGroupInfo == null) {
+      throw new ConstraintException(
+          "Default RSGroup (" + groupName + ") for this table's namespace does not exist.");
+    }
+    if (!rsGroupInfo.containsTable(desc.getTableName())) {
+      LOG.debug("Pre-moving table " + desc.getTableName() + " to RSGroup " + groupName);
+      groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), groupName);
+    }
+  }
+
+  /////////////////////////////////////////////////////////////////////////////
+  // MasterObserver overrides
+  /////////////////////////////////////////////////////////////////////////////
+
+  private boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException {
+    String groupName;
+    try {
+      groupName = master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
+          .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
+      if (groupName == null) {
+        groupName = RSGroupInfo.DEFAULT_GROUP;
+      }
+    } catch (MasterNotRunningException | PleaseHoldException e) {
+      LOG.info("Master has not initialized yet; temporarily using default RSGroup '" +
+          RSGroupInfo.DEFAULT_GROUP + "' for deploy of system table");
+      groupName = RSGroupInfo.DEFAULT_GROUP;
+    }
+
+    RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
+    if (rsGroupInfo == null) {
+      throw new ConstraintException(
+          "Default RSGroup (" + groupName + ") for this table's " + "namespace does not exist.");
+    }
+
+    for (ServerName onlineServer : master.getServerManager().createDestinationServersList()) {
+      if (rsGroupInfo.getServers().contains(onlineServer.getAddress())) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public void preCreateTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      final TableDescriptor desc, final RegionInfo[] regions) throws IOException {
+    if (!desc.getTableName().isSystemTable() && !rsgroupHasServersOnline(desc)) {
+      throw new HBaseIOException("No online servers in the rsgroup, which table " +
+          desc.getTableName().getNameAsString() + " belongs to");
+    }
+  }
+
+  // Assign table to default RSGroup.
+  @Override
+  public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      TableDescriptor desc, RegionInfo[] regions) throws IOException {
+    assignTableToGroup(desc);
+  }
+
+  // Remove table from its RSGroup.
+  @Override
+  public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      TableName tableName) throws IOException {
+    try {
+      RSGroupInfo group = groupAdminServer.getRSGroupInfoOfTable(tableName);
+      if (group != null) {
+        LOG.debug(String.format("Removing deleted table '%s' from rsgroup '%s'", tableName,
+          group.getName()));
+        groupAdminServer.moveTables(Sets.newHashSet(tableName), null);
+      }
+    } catch (IOException ex) {
+      LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex);
+    }
+  }
+
+  @Override
+  public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      NamespaceDescriptor ns) throws IOException {
+    String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
+    if (group != null && groupAdminServer.getRSGroupInfo(group) == null) {
+      throw new ConstraintException("Region server group " + group + " does not exit");
+    }
+  }
+
+  @Override
+  public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      NamespaceDescriptor currentNsDesc, NamespaceDescriptor newNsDesc) throws IOException {
+    preCreateNamespace(ctx, newNsDesc);
+  }
+
+  @Override
+  public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      SnapshotDescription snapshot, TableDescriptor desc) throws IOException {
+    assignTableToGroup(desc);
+  }
+
+  @Override
+  public void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      List<ServerName> servers, List<ServerName> notClearedServers) throws IOException {
+    Set<Address> clearedServer =
+        servers.stream().filter(server -> !notClearedServers.contains(server))
+            .map(ServerName::getAddress).collect(Collectors.toSet());
+    if (!clearedServer.isEmpty()) {
+      groupAdminServer.removeServers(clearedServer);
+    }
+  }
+}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
similarity index 100%
rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java
new file mode 100644
index 0000000..918a4fe
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java
@@ -0,0 +1,378 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Optional;
+import java.util.Set;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.access.AccessChecker;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+
+/**
+ * Implementation of RSGroupAdminService defined in RSGroupAdmin.proto. This class calls
+ * {@link RSGroupAdminServer} for actual work, converts result to protocol buffer response, handles
+ * exceptions if any occurred and then calls the {@code RpcCallback} with the response.
+ */
+class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
+
+  private MasterServices master;
+
+  private RSGroupAdminServer groupAdminServer;
+
+  private AccessChecker accessChecker;
+
+  /** Provider for mapping principal names to Users */
+  private UserProvider userProvider;
+
+  RSGroupAdminServiceImpl() {
+  }
+
+  void initialize(MasterServices master, RSGroupAdminServer groupAdminServer,
+      AccessChecker accessChecker, UserProvider userProvider) {
+    this.master = master;
+    this.groupAdminServer = groupAdminServer;
+    this.accessChecker = accessChecker;
+    this.userProvider = userProvider;
+  }
+
+  @VisibleForTesting
+  void checkPermission(String request) throws IOException {
+    accessChecker.requirePermission(getActiveUser(), request, null, Action.ADMIN);
+  }
+
+  /**
+   * Returns the active user to which authorization checks should be applied. If we are in the
+   * context of an RPC call, the remote user is used, otherwise the currently logged in user is
+   * used.
+   */
+  private User getActiveUser() throws IOException {
+    // for non-rpc handling, fallback to system user
+    Optional<User> optionalUser = RpcServer.getRequestUser();
+    if (optionalUser.isPresent()) {
+      return optionalUser.get();
+    }
+    return userProvider.getCurrent();
+  }
+
+  @Override
+  public void getRSGroupInfo(RpcController controller, GetRSGroupInfoRequest request,
+      RpcCallback<GetRSGroupInfoResponse> done) {
+    GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder();
+    String groupName = request.getRSGroupName();
+    RSGroupAdminEndpoint.LOG.info(
+      master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName);
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName);
+      }
+      checkPermission("getRSGroupInfo");
+      RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
+      if (rsGroupInfo != null) {
+        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
+      }
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void getRSGroupInfoOfTable(RpcController controller, GetRSGroupInfoOfTableRequest request,
+      RpcCallback<GetRSGroupInfoOfTableResponse> done) {
+    GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder();
+    TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+    RSGroupAdminEndpoint.LOG.info(
+      master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName);
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
+      }
+      checkPermission("getRSGroupInfoOfTable");
+      RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName);
+      if (RSGroupInfo != null) {
+        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
+      }
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void moveServers(RpcController controller, MoveServersRequest request,
+      RpcCallback<MoveServersResponse> done) {
+    MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
+    Set<Address> hostPorts = Sets.newHashSet();
+    for (HBaseProtos.ServerName el : request.getServersList()) {
+      hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
+    }
+    RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +
+        " to rsgroup " + request.getTargetGroup());
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
+      }
+      checkPermission("moveServers");
+      groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void moveTables(RpcController controller, MoveTablesRequest request,
+      RpcCallback<MoveTablesResponse> done) {
+    MoveTablesResponse.Builder builder = MoveTablesResponse.newBuilder();
+    Set<TableName> tables = new HashSet<>(request.getTableNameList().size());
+    for (HBaseProtos.TableName tableName : request.getTableNameList()) {
+      tables.add(ProtobufUtil.toTableName(tableName));
+    }
+    RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +
+        " to rsgroup " + request.getTargetGroup());
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup());
+      }
+      checkPermission("moveTables");
+      groupAdminServer.moveTables(tables, request.getTargetGroup());
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postMoveTables(tables, request.getTargetGroup());
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void addRSGroup(RpcController controller, AddRSGroupRequest request,
+      RpcCallback<AddRSGroupResponse> done) {
+    AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
+    RSGroupAdminEndpoint.LOG
+        .info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
+      }
+      checkPermission("addRSGroup");
+      groupAdminServer.addRSGroup(request.getRSGroupName());
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void removeRSGroup(RpcController controller, RemoveRSGroupRequest request,
+      RpcCallback<RemoveRSGroupResponse> done) {
+    RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder();
+    RSGroupAdminEndpoint.LOG
+        .info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
+      }
+      checkPermission("removeRSGroup");
+      groupAdminServer.removeRSGroup(request.getRSGroupName());
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest request,
+      RpcCallback<BalanceRSGroupResponse> done) {
+    BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder();
+    RSGroupAdminEndpoint.LOG.info(
+      master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName());
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName());
+      }
+      checkPermission("balanceRSGroup");
+      boolean balancerRan = groupAdminServer.balanceRSGroup(request.getRSGroupName());
+      builder.setBalanceRan(balancerRan);
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), balancerRan);
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+      builder.setBalanceRan(false);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest request,
+      RpcCallback<ListRSGroupInfosResponse> done) {
+    ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
+    RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " list rsgroup");
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preListRSGroups();
+      }
+      checkPermission("listRSGroup");
+      for (RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) {
+        builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
+      }
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postListRSGroups();
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void getRSGroupInfoOfServer(RpcController controller,
+      GetRSGroupInfoOfServerRequest request, RpcCallback<GetRSGroupInfoOfServerResponse> done) {
+    GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder();
+    Address hp =
+        Address.fromParts(request.getServer().getHostName(), request.getServer().getPort());
+    RSGroupAdminEndpoint.LOG
+        .info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp);
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
+      }
+      checkPermission("getRSGroupInfoOfServer");
+      RSGroupInfo info = groupAdminServer.getRSGroupOfServer(hp);
+      if (info != null) {
+        builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(info));
+      }
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void moveServersAndTables(RpcController controller, MoveServersAndTablesRequest request,
+      RpcCallback<MoveServersAndTablesResponse> done) {
+    MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder();
+    Set<Address> hostPorts = Sets.newHashSet();
+    for (HBaseProtos.ServerName el : request.getServersList()) {
+      hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
+    }
+    Set<TableName> tables = new HashSet<>(request.getTableNameList().size());
+    for (HBaseProtos.TableName tableName : request.getTableNameList()) {
+      tables.add(ProtobufUtil.toTableName(tableName));
+    }
+    RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +
+        " and tables " + tables + " to rsgroup" + request.getTargetGroup());
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables,
+          request.getTargetGroup());
+      }
+      checkPermission("moveServersAndTables");
+      groupAdminServer.moveServersAndTables(hostPorts, tables, request.getTargetGroup());
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postMoveServersAndTables(hostPorts, tables,
+          request.getTargetGroup());
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+
+  @Override
+  public void removeServers(RpcController controller, RemoveServersRequest request,
+      RpcCallback<RemoveServersResponse> done) {
+    RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder();
+    Set<Address> servers = Sets.newHashSet();
+    for (HBaseProtos.ServerName el : request.getServersList()) {
+      servers.add(Address.fromParts(el.getHostName(), el.getPort()));
+    }
+    RSGroupAdminEndpoint.LOG.info(
+      master.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers);
+    try {
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().preRemoveServers(servers);
+      }
+      checkPermission("removeServers");
+      groupAdminServer.removeServers(servers);
+      if (master.getMasterCoprocessorHost() != null) {
+        master.getMasterCoprocessorHost().postRemoveServers(servers);
+      }
+    } catch (IOException e) {
+      CoprocessorRpcUtils.setControllerException(controller, e);
+    }
+    done.run(builder.build());
+  }
+}
\ No newline at end of file
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
similarity index 97%
rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index 7394f55..be76805 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.rsgroup;
 
 import java.io.IOException;
@@ -56,18 +55,17 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 
 /**
- * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721)
- * It does region balance based on a table's group membership.
- *
- * Most assignment methods contain two exclusive code paths: Online - when the group
- * table is online and Offline - when it is unavailable.
- *
- * During Offline, assignments are assigned based on cached information in zookeeper.
- * If unavailable (ie bootstrap) then regions are assigned randomly.
- *
- * Once the GROUP table has been assigned, the balancer switches to Online and will then
- * start providing appropriate assignments for user tables.
- *
+ * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) It does
+ * region balance based on a table's group membership.
+ * <p/>
+ * Most assignment methods contain two exclusive code paths: Online - when the group table is online
+ * and Offline - when it is unavailable.
+ * <p/>
+ * During Offline, assignments are assigned based on cached information in zookeeper. If unavailable
+ * (ie bootstrap) then regions are assigned randomly.
+ * <p/>
+ * Once the GROUP table has been assigned, the balancer switches to Online and will then start
+ * providing appropriate assignments for user tables.
  */
 @InterfaceAudience.Private
 public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java
similarity index 100%
rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java
similarity index 100%
rename from hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
similarity index 81%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
index 0278e3c..c1c157a 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.security.access.SecureTestUtil;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -49,11 +48,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Performs authorization checks for rsgroup operations, according to different
- * levels of authorized users.
+ * Performs authorization checks for rsgroup operations, according to different levels of authorized
+ * users.
  */
-@Category({SecurityTests.class, MediumTests.class})
-public class TestRSGroupsWithACL extends SecureTestUtil{
+@Category({ SecurityTests.class, MediumTests.class })
+public class TestRSGroupsWithACL extends SecureTestUtil {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
@@ -98,8 +97,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   public static void setupBeforeClass() throws Exception {
     // setup configuration
     conf = TEST_UTIL.getConfiguration();
-    conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
-        RSGroupBasedLoadBalancer.class.getName());
+    conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName());
     // Enable security
     enableSecurity(conf);
     // Verify enableSecurity sets up what we require
@@ -108,8 +106,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
     configureRSGroupAdminEndpoint(conf);
 
     TEST_UTIL.startMiniCluster();
-    rsGroupAdminEndpoint = (RSGroupAdminEndpoint) TEST_UTIL.getMiniHBaseCluster().getMaster().
-        getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName());
+    rsGroupAdminEndpoint = (RSGroupAdminEndpoint) TEST_UTIL.getMiniHBaseCluster().getMaster()
+        .getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName());
     // Wait for the ACL table to become available
     TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME);
 
@@ -141,31 +139,21 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
     cfd.setMaxVersions(100);
     tableBuilder.setColumnFamily(cfd.build());
     tableBuilder.setValue(TableDescriptorBuilder.OWNER, USER_OWNER.getShortName());
-    createTable(TEST_UTIL, tableBuilder.build(),
-        new byte[][] { Bytes.toBytes("s") });
+    createTable(TEST_UTIL, tableBuilder.build(), new byte[][] { Bytes.toBytes("s") });
 
     // Set up initial grants
-    grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(),
-        Permission.Action.ADMIN,
-        Permission.Action.CREATE,
-        Permission.Action.READ,
-        Permission.Action.WRITE);
+    grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN,
+      Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE);
 
-    grantOnTable(TEST_UTIL, USER_RW.getShortName(),
-        TEST_TABLE, TEST_FAMILY, null,
-        Permission.Action.READ,
-        Permission.Action.WRITE);
+    grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null,
+      Permission.Action.READ, Permission.Action.WRITE);
 
     // USER_CREATE is USER_RW plus CREATE permissions
-    grantOnTable(TEST_UTIL, USER_CREATE.getShortName(),
-        TEST_TABLE, null, null,
-        Permission.Action.CREATE,
-        Permission.Action.READ,
-        Permission.Action.WRITE);
+    grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), TEST_TABLE, null, null,
+      Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE);
 
-    grantOnTable(TEST_UTIL, USER_RO.getShortName(),
-        TEST_TABLE, TEST_FAMILY, null,
-        Permission.Action.READ);
+    grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
+      Permission.Action.READ);
 
     grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN);
     grantGlobal(TEST_UTIL, toGroupEntry(GROUP_CREATE), Permission.Action.CREATE);
@@ -174,8 +162,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
 
     assertEquals(4, PermissionStorage.getTablePermissions(conf, TEST_TABLE).size());
     try {
-      assertEquals(4, AccessControlClient.getUserPermissions(systemUserConnection,
-          TEST_TABLE.toString()).size());
+      assertEquals(4,
+        AccessControlClient.getUserPermissions(systemUserConnection, TEST_TABLE.toString()).size());
     } catch (AssertionError e) {
       fail(e.getMessage());
     } catch (Throwable e) {
@@ -210,14 +198,13 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
       coprocessors += "," + currentCoprocessors;
     }
     conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, coprocessors);
-    conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
-        RSGroupBasedLoadBalancer.class.getName());
+    conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName());
   }
 
   @Test
   public void testGetRSGroupInfo() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("getRSGroupInfo");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("getRSGroupInfo");
       return null;
     };
 
@@ -227,7 +214,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testGetRSGroupInfoOfTable() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("getRSGroupInfoOfTable");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("getRSGroupInfoOfTable");
       return null;
     };
 
@@ -237,7 +224,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testMoveServers() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("moveServers");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("moveServers");
       return null;
     };
 
@@ -247,7 +234,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testMoveTables() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("moveTables");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("moveTables");
       return null;
     };
 
@@ -257,7 +244,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testAddRSGroup() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("addRSGroup");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("addRSGroup");
       return null;
     };
 
@@ -267,7 +254,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testRemoveRSGroup() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("removeRSGroup");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("removeRSGroup");
       return null;
     };
 
@@ -277,7 +264,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testBalanceRSGroup() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("balanceRSGroup");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("balanceRSGroup");
       return null;
     };
 
@@ -287,7 +274,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testListRSGroup() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("listRSGroup");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("listRSGroup");
       return null;
     };
 
@@ -297,7 +284,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testGetRSGroupInfoOfServer() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("getRSGroupInfoOfServer");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("getRSGroupInfoOfServer");
       return null;
     };
 
@@ -307,7 +294,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testMoveServersAndTables() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("moveServersAndTables");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("moveServersAndTables");
       return null;
     };
 
@@ -317,7 +304,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   @Test
   public void testRemoveServers() throws Exception {
     AccessTestAction action = () -> {
-      rsGroupAdminEndpoint.checkPermission("removeServers");
+      rsGroupAdminEndpoint.getGroupAdminService().checkPermission("removeServers");
       return null;
     };
 
@@ -326,7 +313,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
 
   private void validateAdminPermissions(AccessTestAction action) throws Exception {
     verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN);
-    verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO,
-        USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE);
+    verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
+      USER_GROUP_WRITE, USER_GROUP_CREATE);
   }
 }
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
similarity index 100%
rename from hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index 7483da5..a6b62ad 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -187,41 +187,6 @@
     </dependency>
   </dependencies>
   <profiles>
-    <profile>
-      <id>rsgroup</id>
-      <activation>
-        <property>
-            <name>!skip-rsgroup</name>
-        </property>
-      </activation>
-      <dependencies>
-        <dependency>
-          <groupId>org.apache.hbase</groupId>
-          <artifactId>hbase-rsgroup</artifactId>
-        </dependency>
-      </dependencies>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>build-helper-maven-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>add-test-source</id>
-                <goals>
-                  <goal>add-test-source</goal>
-                </goals>
-                <configuration>
-                  <sources>
-                    <source>src/test/rsgroup</source>
-                  </sources>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
     <!-- Skip the tests in this module -->
     <profile>
       <id>skipShellTests</id>
diff --git a/pom.xml b/pom.xml
index 1beed46..f8e54a3 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1540,7 +1540,6 @@
     <procedure.test.jar>hbase-procedure-${project.version}-tests.jar</procedure.test.jar>
     <it.test.jar>hbase-it-${project.version}-tests.jar</it.test.jar>
     <annotations.test.jar>hbase-annotations-${project.version}-tests.jar</annotations.test.jar>
-    <rsgroup.test.jar>hbase-rsgroup-${project.version}-tests.jar</rsgroup.test.jar>
     <mapreduce.test.jar>hbase-mapreduce-${project.version}-tests.jar</mapreduce.test.jar>
     <zookeeper.test.jar>hbase-zookeeper-${project.version}-tests.jar</zookeeper.test.jar>
     <shell-executable>bash</shell-executable>
@@ -1671,18 +1670,6 @@
         <scope>test</scope>
       </dependency>
       <dependency>
-        <artifactId>hbase-rsgroup</artifactId>
-        <groupId>org.apache.hbase</groupId>
-        <version>${project.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>hbase-rsgroup</artifactId>
-        <groupId>org.apache.hbase</groupId>
-        <version>${project.version}</version>
-        <type>test-jar</type>
-        <scope>test</scope>
-      </dependency>
-      <dependency>
         <artifactId>hbase-replication</artifactId>
         <groupId>org.apache.hbase</groupId>
         <version>${project.version}</version>
@@ -2321,17 +2308,6 @@
   -->
   <profiles>
     <profile>
-      <id>rsgroup</id>
-      <activation>
-        <property>
-            <name>!skip-rsgroup</name>
-        </property>
-      </activation>
-      <modules>
-        <module>hbase-rsgroup</module>
-      </modules>
-    </profile>
-    <profile>
       <id>build-with-jdk8</id>
       <activation>
         <jdk>[1.8,)</jdk>