You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2016/11/17 07:07:23 UTC

[1/3] hbase git commit: HBASE-16169: Make RegionSizeCalculator scalable

Repository: hbase
Updated Branches:
  refs/heads/master 48439e572 -> 80acc2dca


http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-protocol-shaded/src/main/protobuf/Admin.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 77cfcff..47d39be 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -25,6 +25,7 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option optimize_for = SPEED;
 
+import "ClusterStatus.proto";
 import "HBase.proto";
 import "WAL.proto";
 
@@ -267,6 +268,14 @@ message UpdateConfigurationRequest {
 message UpdateConfigurationResponse {
 }
 
+message GetRegionLoadRequest {
+  optional TableName table_name = 1;
+}
+
+message GetRegionLoadResponse {
+  repeated RegionLoad region_loads = 1;
+}
+
 service AdminService {
   rpc GetRegionInfo(GetRegionInfoRequest)
     returns(GetRegionInfoResponse);
@@ -321,4 +330,7 @@ service AdminService {
 
   rpc UpdateConfiguration(UpdateConfigurationRequest)
     returns(UpdateConfigurationResponse);
+
+  rpc GetRegionLoad(GetRegionLoadRequest)
+    returns(GetRegionLoadResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 24d8170..8e78422 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1514,7 +1514,7 @@ public class HRegionServer extends HasThread implements
    *
    * @throws IOException
    */
-  private RegionLoad createRegionLoad(final Region r, RegionLoad.Builder regionLoadBldr,
+  RegionLoad createRegionLoad(final Region r, RegionLoad.Builder regionLoadBldr,
       RegionSpecifier.Builder regionSpecifier) throws IOException {
     byte[] name = r.getRegionInfo().getRegionName();
     int stores = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index b0165f0..cd62115 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -103,6 +103,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegion
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
@@ -160,6 +162,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActi
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
@@ -1550,6 +1554,34 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
   }
 
+  @Override
+  @QosPriority(priority=HConstants.ADMIN_QOS)
+  public GetRegionLoadResponse getRegionLoad(RpcController controller,
+      GetRegionLoadRequest request) throws ServiceException {
+
+    List<Region> regions;
+    if (request.hasTableName()) {
+      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+      regions = regionServer.getOnlineRegions(tableName);
+    } else {
+      regions = regionServer.getOnlineRegions();
+    }
+    List<RegionLoad> rLoads = new ArrayList<RegionLoad>(regions.size());
+    RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder();
+    RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder();
+
+    try {
+      for (Region region : regions) {
+        rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier));
+      }
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder();
+    builder.addAllRegionLoads(rLoads);
+    return builder.build();
+  }
+
   /**
    * Get some information of the region server.
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
index 86b06ac..c616a25 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
@@ -19,21 +19,18 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-import java.util.TreeSet;
 
+import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
@@ -59,6 +56,7 @@ public class RegionSizeCalculator {
   private final Map<byte[], Long> sizeMap = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
 
   static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable";
+  private static final long MEGABYTE = 1024L * 1024L;
 
   /**
    * Computes size of each region for table and given column families.
@@ -95,38 +93,36 @@ public class RegionSizeCalculator {
 
     LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
 
-    //get regions for table
-    List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
-    Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    for (HRegionLocation regionInfo : tableRegionInfos) {
-      tableRegions.add(regionInfo.getRegionInfo().getRegionName());
-    }
-
-    ClusterStatus clusterStatus = admin.getClusterStatus();
-    Collection<ServerName> servers = clusterStatus.getServers();
-    final long megaByte = 1024L * 1024L;
+    // Get the servers which host regions of the table
+    Set<ServerName> tableServers = getRegionServersOfTable(regionLocator);
 
-    //iterate all cluster regions, filter regions from our table and compute their size
-    for (ServerName serverName: servers) {
-      ServerLoad serverLoad = clusterStatus.getLoad(serverName);
+    for (ServerName tableServerName : tableServers) {
+      Map<byte[], RegionLoad> regionLoads =
+          admin.getRegionLoad(tableServerName, regionLocator.getName());
+      for (RegionLoad regionLoad : regionLoads.values()) {
 
-      for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
         byte[] regionId = regionLoad.getName();
+        long regionSizeBytes = regionLoad.getStorefileSizeMB() * MEGABYTE;
+        sizeMap.put(regionId, regionSizeBytes);
 
-        if (tableRegions.contains(regionId)) {
-
-          long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
-          sizeMap.put(regionId, regionSizeBytes);
-
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
-          }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
         }
       }
     }
     LOG.debug("Region sizes calculated");
   }
 
+  private Set<ServerName> getRegionServersOfTable(RegionLocator regionLocator)
+      throws IOException {
+
+    Set<ServerName> tableServers = Sets.newHashSet();
+    for (HRegionLocation regionLocation : regionLocator.getAllRegionLocations()) {
+      tableServers.add(regionLocation.getServerName());
+    }
+    return tableServers;
+  }
+
   boolean enabled(Configuration configuration) {
     return configuration.getBoolean(ENABLE_REGIONSIZECALCULATOR, true);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
new file mode 100644
index 0000000..ebd9f94
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
@@ -0,0 +1,135 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.*;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+@Category({MiscTests.class, MediumTests.class})
+public class TestRegionLoad {
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static Admin admin;
+
+  private static final TableName TABLE_1 = TableName.valueOf("table_1");
+  private static final TableName TABLE_2 = TableName.valueOf("table_2");
+  private static final TableName TABLE_3 = TableName.valueOf("table_3");
+  private static final TableName[] tables = new TableName[]{TABLE_1, TABLE_2, TABLE_3};
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    UTIL.startMiniCluster(4);
+    admin = UTIL.getAdmin();
+    admin.setBalancerRunning(false, true);
+    createTables();
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    for (TableName table : tables) {
+      UTIL.deleteTableIfAny(table);
+    }
+    UTIL.shutdownMiniCluster();
+  }
+
+  private static void createTables() throws IOException, InterruptedException {
+    byte[] FAMILY = Bytes.toBytes("f");
+    for (TableName tableName : tables) {
+      Table table = UTIL.createMultiRegionTable(tableName, FAMILY, 16);
+      UTIL.waitTableAvailable(tableName);
+      UTIL.loadTable(table, FAMILY);
+    }
+  }
+
+  @Test
+  public void testRegionLoad() throws Exception {
+
+    // Check if regions match with the regionLoad from the server
+    for (ServerName serverName : admin.getClusterStatus().getServers()) {
+      List<HRegionInfo> regions = admin.getOnlineRegions(serverName);
+      Collection<RegionLoad> regionLoads = admin.getRegionLoad(serverName).values();
+      checkRegionsAndRegionLoads(regions, regionLoads);
+    }
+
+    // Check if regionLoad matches the table's regions and nothing is missed
+    for (TableName table : new TableName[]{TABLE_1, TABLE_2, TABLE_3}) {
+      List<HRegionInfo> tableRegions = admin.getTableRegions(table);
+
+      List<RegionLoad> regionLoads = Lists.newArrayList();
+      for (ServerName serverName : admin.getClusterStatus().getServers()) {
+        regionLoads.addAll(admin.getRegionLoad(serverName, table).values());
+      }
+      checkRegionsAndRegionLoads(tableRegions, regionLoads);
+    }
+
+    // Check RegionLoad matches the regionLoad from ClusterStatus
+    ClusterStatus clusterStatus = admin.getClusterStatus();
+    for (ServerName serverName : clusterStatus.getServers()) {
+      ServerLoad serverLoad = clusterStatus.getLoad(serverName);
+      Map<byte[], RegionLoad> regionLoads = admin.getRegionLoad(serverName);
+      compareRegionLoads(serverLoad.getRegionsLoad(), regionLoads);
+    }
+  }
+
+  private void compareRegionLoads(Map<byte[], RegionLoad> regionLoadCluster,
+      Map<byte[], RegionLoad> regionLoads) {
+
+    assertEquals("No of regionLoads from clusterStatus and regionloads from RS doesn't match",
+        regionLoadCluster.size(), regionLoads.size());
+
+    // The contents of region load from cluster and server should match
+    for (byte[] regionName : regionLoadCluster.keySet()) {
+      regionLoads.remove(regionName);
+    }
+    assertEquals("regionLoads from SN should be empty", 0, regionLoads.size());
+  }
+
+  private void checkRegionsAndRegionLoads(Collection<HRegionInfo> regions,
+      Collection<RegionLoad> regionLoads) {
+
+    assertEquals("No of regions and regionloads doesn't match",
+        regions.size(), regionLoads.size());
+
+    Map<byte[], RegionLoad> regionLoadMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
+    for (RegionLoad regionLoad : regionLoads) {
+      regionLoadMap.put(regionLoad.getName(), regionLoad);
+    }
+    for (HRegionInfo info : regions) {
+      assertTrue("Region not in regionLoadMap region:" + info.getRegionNameAsString() +
+          " regionMap: " + regionLoadMap, regionLoadMap.containsKey(info.getRegionName()));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 0237f8d..1594b6d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegion
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
@@ -452,6 +454,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
+  public GetRegionLoadResponse getRegionLoad(RpcController controller,
+      GetRegionLoadRequest request) throws ServiceException {
+    GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder();
+    return builder.build();
+  }
+
+  @Override
   public GetStoreFileResponse getStoreFile(RpcController controller,
       GetStoreFileRequest request) throws ServiceException {
     // TODO Auto-generated method stub

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
index 8b74112..95d8b4a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
@@ -18,11 +18,9 @@
 package org.apache.hadoop.hbase.util;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
@@ -39,8 +37,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT;
 import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 @Category({MiscTests.class, SmallTests.class})
@@ -48,6 +46,8 @@ public class TestRegionSizeCalculator {
 
   private Configuration configuration = new Configuration();
   private final long megabyte = 1024L * 1024L;
+  private final ServerName sn = ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT,
+      ServerName.NON_STARTCODE);
 
   @Test
   public void testSimpleTestCase() throws Exception {
@@ -55,14 +55,9 @@ public class TestRegionSizeCalculator {
     RegionLocator regionLocator = mockRegionLocator("region1", "region2", "region3");
 
     Admin admin = mockAdmin(
-      mockServer(
         mockRegion("region1", 123),
-        mockRegion("region3", 1232)
-      ),
-      mockServer(
-        mockRegion("region2",  54321),
-        mockRegion("otherTableRegion", 110)
-      )
+        mockRegion("region3", 1232),
+        mockRegion("region2",  54321)
     );
 
     RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin);
@@ -70,7 +65,7 @@ public class TestRegionSizeCalculator {
     assertEquals(123 * megabyte, calculator.getRegionSize("region1".getBytes()));
     assertEquals(54321 * megabyte, calculator.getRegionSize("region2".getBytes()));
     assertEquals(1232 * megabyte, calculator.getRegionSize("region3".getBytes()));
-    // if region is not inside our table, it should return 0
+    // if regionCalculator does not know about a region, it should return 0
     assertEquals(0 * megabyte, calculator.getRegionSize("otherTableRegion".getBytes()));
 
     assertEquals(3, calculator.getRegionSizeMap().size());
@@ -87,9 +82,7 @@ public class TestRegionSizeCalculator {
     RegionLocator regionLocator = mockRegionLocator("largeRegion");
 
     Admin admin = mockAdmin(
-      mockServer(
         mockRegion("largeRegion", Integer.MAX_VALUE)
-      )
     );
 
     RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin);
@@ -104,9 +97,7 @@ public class TestRegionSizeCalculator {
     RegionLocator table = mockRegionLocator(regionName);
 
     Admin admin = mockAdmin(
-      mockServer(
         mockRegion(regionName, 999)
-      )
     );
 
     //first request on enabled calculator
@@ -133,21 +124,23 @@ public class TestRegionSizeCalculator {
     for (String regionName : regionNames) {
       HRegionInfo info = Mockito.mock(HRegionInfo.class);
       when(info.getRegionName()).thenReturn(regionName.getBytes());
-      regionLocations.add(new HRegionLocation(info, null));//we are not interested in values
+      regionLocations.add(new HRegionLocation(info, sn));
     }
 
     return mockedTable;
   }
 
   /**
-   * Creates mock returning ClusterStatus info about given servers.
+   * Creates mock returning RegionLoad info about given servers.
   */
-  private Admin mockAdmin(ServerLoad... servers) throws Exception {
-    //get clusterstatus
+  private Admin mockAdmin(RegionLoad... regionLoadArray) throws Exception {
     Admin mockAdmin = Mockito.mock(Admin.class);
-    ClusterStatus clusterStatus = mockCluster(servers);
+    Map<byte[], RegionLoad> regionLoads = new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
+    for (RegionLoad regionLoad : regionLoadArray) {
+      regionLoads.put(regionLoad.getName(), regionLoad);
+    }
     when(mockAdmin.getConfiguration()).thenReturn(configuration);
-    when(mockAdmin.getClusterStatus()).thenReturn(clusterStatus);
+    when(mockAdmin.getRegionLoad(sn, TableName.valueOf("sizeTestTable"))).thenReturn(regionLoads);
     return mockAdmin;
   }
 
@@ -163,35 +156,4 @@ public class TestRegionSizeCalculator {
     when(region.getStorefileSizeMB()).thenReturn(fileSizeMb);
     return region;
   }
-
-  private ClusterStatus mockCluster(ServerLoad[] servers) {
-    List<ServerName> serverNames = new ArrayList<ServerName>();
-
-    ClusterStatus clusterStatus = Mockito.mock(ClusterStatus.class);
-    when(clusterStatus.getServers()).thenReturn(serverNames);
-
-    int serverCounter = 0;
-    for (ServerLoad server : servers) {
-      ServerName serverName = mock(ServerName.class);
-      when(serverName.getServerName()).thenReturn("server" + (serverCounter++));
-      serverNames.add(serverName);
-      when(clusterStatus.getLoad(serverName)).thenReturn(server);
-    }
-
-    return clusterStatus;
-  }
-
-  /** Creates mock of region server with given regions*/
-  private ServerLoad mockServer(RegionLoad... regions) {
-    ServerLoad serverLoad = Mockito.mock(ServerLoad.class);
-    Map<byte[], RegionLoad> regionMap = new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
-
-    for (RegionLoad regionName : regions) {
-      regionMap.put(regionName.getName(), regionName);
-    }
-
-    when(serverLoad.getRegionsLoad()).thenReturn(regionMap);
-    return serverLoad;
-  }
-
 }


[3/3] hbase git commit: HBASE-16169: Make RegionSizeCalculator scalable

Posted by st...@apache.org.
HBASE-16169: Make RegionSizeCalculator scalable

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80acc2dc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80acc2dc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80acc2dc

Branch: refs/heads/master
Commit: 80acc2dca5435f46c683f9eae78861bc766589a6
Parents: 48439e5
Author: thiruvel <th...@yahoo-inc.com>
Authored: Wed Nov 9 17:43:38 2016 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Wed Nov 16 23:07:14 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Admin.java   |   20 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   20 +
 .../hbase/shaded/protobuf/ProtobufUtil.java     |   25 +
 .../hbase/shaded/protobuf/RequestConverter.java |   16 +
 .../shaded/protobuf/generated/AdminProtos.java  | 1678 ++++++++++++++++--
 .../src/main/protobuf/Admin.proto               |   12 +
 .../hbase/regionserver/HRegionServer.java       |    2 +-
 .../hbase/regionserver/RSRpcServices.java       |   32 +
 .../hadoop/hbase/util/RegionSizeCalculator.java |   48 +-
 .../org/apache/hadoop/hbase/TestRegionLoad.java |  135 ++
 .../hadoop/hbase/master/MockRegionServer.java   |    9 +
 .../hbase/util/TestRegionSizeCalculator.java    |   66 +-
 12 files changed, 1867 insertions(+), 196 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 34c00ce..52b935f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
@@ -1021,6 +1022,25 @@ public interface Admin extends Abortable, Closeable {
   ClusterStatus getClusterStatus() throws IOException;
 
   /**
+   * Get {@link RegionLoad} of all regions hosted on a regionserver.
+   *
+   * @param sn region server from which regionload is required.
+   * @return region load map of all regions hosted on a region server
+   * @throws IOException if a remote or network exception occurs
+   */
+  Map<byte[], RegionLoad> getRegionLoad(ServerName sn) throws IOException;
+
+  /**
+   * Get {@link RegionLoad} of all regions hosted on a regionserver for a table.
+   *
+   * @param sn region server from which regionload is required.
+   * @param tableName get region load of regions belonging to the table
+   * @return region load map of all regions of a table hosted on a region server
+   * @throws IOException if a remote or network exception occurs
+   */
+  Map<byte[], RegionLoad> getRegionLoad(ServerName sn, TableName tableName) throws IOException;
+
+  /**
    * @return Configuration used by the instance.
    */
   Configuration getConfiguration();

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 4e195d1..00463f38 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -28,6 +28,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.TreeMap;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
@@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
@@ -1880,6 +1882,24 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
+  public Map<byte[], RegionLoad> getRegionLoad(final ServerName sn) throws IOException {
+    return getRegionLoad(sn, null);
+  }
+
+  @Override
+  public Map<byte[], RegionLoad> getRegionLoad(final ServerName sn, final TableName tableName)
+      throws IOException {
+    AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
+    HBaseRpcController controller = rpcControllerFactory.newController();
+    List<RegionLoad> regionLoads = ProtobufUtil.getRegionLoad(controller, admin, tableName);
+    Map<byte[], RegionLoad> resultMap = new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
+    for (RegionLoad regionLoad : regionLoads) {
+      resultMap.put(regionLoad.getName(), regionLoad);
+    }
+    return resultMap;
+  }
+
+  @Override
   public Configuration getConfiguration() {
     return this.conf;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 2758c26..0c95615 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -112,6 +112,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRe
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
@@ -1677,6 +1679,29 @@ public final class ProtobufUtil {
     }
   }
 
+  public static List<org.apache.hadoop.hbase.RegionLoad> getRegionLoad(
+      final RpcController controller, final AdminService.BlockingInterface admin,
+      final TableName tableName) throws IOException {
+    GetRegionLoadRequest request = RequestConverter.buildGetRegionLoadRequest(tableName);
+    GetRegionLoadResponse response;
+    try {
+      response = admin.getRegionLoad(controller, request);
+    } catch (ServiceException se) {
+      throw getRemoteException(se);
+    }
+    return getRegionLoadInfo(response);
+  }
+
+  static List<org.apache.hadoop.hbase.RegionLoad> getRegionLoadInfo(
+      GetRegionLoadResponse regionLoadResponse) {
+    List<org.apache.hadoop.hbase.RegionLoad> regionLoadList =
+        new ArrayList<>(regionLoadResponse.getRegionLoadsCount());
+    for (RegionLoad regionLoad : regionLoadResponse.getRegionLoadsList()) {
+      regionLoadList.add(new org.apache.hadoop.hbase.RegionLoad(regionLoad));
+    }
+    return regionLoadList;
+  }
+
   /**
    * A helper to close a region given a region name
    * using admin protocol.

http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index abd1563..61aa5c7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -50,6 +50,8 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
@@ -750,6 +752,20 @@ public final class RequestConverter {
     return builder.build();
   }
 
+  /**
+   * Create a protocol buffer GetRegionLoadRequest for all regions/regions of a table.
+   *
+   * @param tableName the table for which regionLoad should be obtained from RS
+   * @return a protocol buffer GetRegionLoadRequest
+   */
+  public static GetRegionLoadRequest buildGetRegionLoadRequest(final TableName tableName) {
+    GetRegionLoadRequest.Builder builder = GetRegionLoadRequest.newBuilder();
+    if (tableName != null) {
+      builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
+    }
+    return builder.build();
+  }
+
  /**
   * Create a protocol buffer GetOnlineRegionRequest
   *


[2/3] hbase git commit: HBASE-16169: Make RegionSizeCalculator scalable

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/80acc2dc/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index b4e46b0..6f20fcc 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -24645,6 +24645,1349 @@ public final class AdminProtos {
 
   }
 
+  public interface GetRegionLoadRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.GetRegionLoadRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 1;</code>
+     */
+    boolean hasTableName();
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName();
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetRegionLoadRequest}
+   */
+  public  static final class GetRegionLoadRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.GetRegionLoadRequest)
+      GetRegionLoadRequestOrBuilder {
+    // Use GetRegionLoadRequest.newBuilder() to construct.
+    private GetRegionLoadRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private GetRegionLoadRequest() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetRegionLoadRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = tableName_.toBuilder();
+              }
+              tableName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(tableName_);
+                tableName_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int TABLE_NAME_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_;
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 1;</code>
+     */
+    public boolean hasTableName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
+      return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+    }
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+      return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (hasTableName()) {
+        if (!getTableName().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, getTableName());
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, getTableName());
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest) obj;
+
+      boolean result = true;
+      result = result && (hasTableName() == other.hasTableName());
+      if (hasTableName()) {
+        result = result && getTableName()
+            .equals(other.getTableName());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasTableName()) {
+        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getTableName().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetRegionLoadRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.GetRegionLoadRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getTableNameFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        if (tableNameBuilder_ == null) {
+          tableName_ = null;
+        } else {
+          tableNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (tableNameBuilder_ == null) {
+          result.tableName_ = tableName_;
+        } else {
+          result.tableName_ = tableNameBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance()) return this;
+        if (other.hasTableName()) {
+          mergeTableName(other.getTableName());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (hasTableName()) {
+          if (!getTableName().isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      public boolean hasTableName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
+        if (tableNameBuilder_ == null) {
+          return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+        } else {
+          return tableNameBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder setTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableName_ = value;
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder setTableName(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tableNameBuilder_ == null) {
+          tableName_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder mergeTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              tableName_ != null &&
+              tableName_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+            tableName_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+          } else {
+            tableName_ = value;
+          }
+          onChanged();
+        } else {
+          tableNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      public Builder clearTableName() {
+        if (tableNameBuilder_ == null) {
+          tableName_ = null;
+          onChanged();
+        } else {
+          tableNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getTableNameFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+        if (tableNameBuilder_ != null) {
+          return tableNameBuilder_.getMessageOrBuilder();
+        } else {
+          return tableName_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 1;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+          getTableNameFieldBuilder() {
+        if (tableNameBuilder_ == null) {
+          tableNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+                  getTableName(),
+                  getParentForChildren(),
+                  isClean());
+          tableName_ = null;
+        }
+        return tableNameBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetRegionLoadRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetRegionLoadRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetRegionLoadRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<GetRegionLoadRequest>() {
+      public GetRegionLoadRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new GetRegionLoadRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetRegionLoadRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetRegionLoadRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface GetRegionLoadResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.GetRegionLoadResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad>
+        getRegionLoadsList();
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index);
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    int getRegionLoadsCount();
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>
+        getRegionLoadsOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetRegionLoadResponse}
+   */
+  public  static final class GetRegionLoadResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.GetRegionLoadResponse)
+      GetRegionLoadResponseOrBuilder {
+    // Use GetRegionLoadResponse.newBuilder() to construct.
+    private GetRegionLoadResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private GetRegionLoadResponse() {
+      regionLoads_ = java.util.Collections.emptyList();
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetRegionLoadResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                regionLoads_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              regionLoads_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.Builder.class);
+    }
+
+    public static final int REGION_LOADS_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad> regionLoads_;
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad> getRegionLoadsList() {
+      return regionLoads_;
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>
+        getRegionLoadsOrBuilderList() {
+      return regionLoads_;
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    public int getRegionLoadsCount() {
+      return regionLoads_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) {
+      return regionLoads_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
+        int index) {
+      return regionLoads_.get(index);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      for (int i = 0; i < getRegionLoadsCount(); i++) {
+        if (!getRegionLoads(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      for (int i = 0; i < regionLoads_.size(); i++) {
+        output.writeMessage(1, regionLoads_.get(i));
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      for (int i = 0; i < regionLoads_.size(); i++) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, regionLoads_.get(i));
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse) obj;
+
+      boolean result = true;
+      result = result && getRegionLoadsList()
+          .equals(other.getRegionLoadsList());
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getRegionLoadsCount() > 0) {
+        hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER;
+        hash = (53 * hash) + getRegionLoadsList().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetRegionLoadResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.GetRegionLoadResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getRegionLoadsFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        if (regionLoadsBuilder_ == null) {
+          regionLoads_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          regionLoadsBuilder_.clear();
+        }
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_GetRegionLoadResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse(this);
+        int from_bitField0_ = bitField0_;
+        if (regionLoadsBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.regionLoads_ = regionLoads_;
+        } else {
+          result.regionLoads_ = regionLoadsBuilder_.build();
+        }
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance()) return this;
+        if (regionLoadsBuilder_ == null) {
+          if (!other.regionLoads_.isEmpty()) {
+            if (regionLoads_.isEmpty()) {
+              regionLoads_ = other.regionLoads_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureRegionLoadsIsMutable();
+              regionLoads_.addAll(other.regionLoads_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.regionLoads_.isEmpty()) {
+            if (regionLoadsBuilder_.isEmpty()) {
+              regionLoadsBuilder_.dispose();
+              regionLoadsBuilder_ = null;
+              regionLoads_ = other.regionLoads_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              regionLoadsBuilder_ =
+                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
+                   getRegionLoadsFieldBuilder() : null;
+            } else {
+              regionLoadsBuilder_.addAllMessages(other.regionLoads_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        for (int i = 0; i < getRegionLoadsCount(); i++) {
+          if (!getRegionLoads(i).isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad> regionLoads_ =
+        java.util.Collections.emptyList();
+      private void ensureRegionLoadsIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          regionLoads_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad>(regionLoads_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> regionLoadsBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad> getRegionLoadsList() {
+        if (regionLoadsBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(regionLoads_);
+        } else {
+          return regionLoadsBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public int getRegionLoadsCount() {
+        if (regionLoadsBuilder_ == null) {
+          return regionLoads_.size();
+        } else {
+          return regionLoadsBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) {
+        if (regionLoadsBuilder_ == null) {
+          return regionLoads_.get(index);
+        } else {
+          return regionLoadsBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder setRegionLoads(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
+        if (regionLoadsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureRegionLoadsIsMutable();
+          regionLoads_.set(index, value);
+          onChanged();
+        } else {
+          regionLoadsBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder setRegionLoads(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
+        if (regionLoadsBuilder_ == null) {
+          ensureRegionLoadsIsMutable();
+          regionLoads_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          regionLoadsBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder addRegionLoads(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
+        if (regionLoadsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureRegionLoadsIsMutable();
+          regionLoads_.add(value);
+          onChanged();
+        } else {
+          regionLoadsBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder addRegionLoads(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
+        if (regionLoadsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureRegionLoadsIsMutable();
+          regionLoads_.add(index, value);
+          onChanged();
+        } else {
+          regionLoadsBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder addRegionLoads(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
+        if (regionLoadsBuilder_ == null) {
+          ensureRegionLoadsIsMutable();
+          regionLoads_.add(builderForValue.build());
+          onChanged();
+        } else {
+          regionLoadsBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder addRegionLoads(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
+        if (regionLoadsBuilder_ == null) {
+          ensureRegionLoadsIsMutable();
+          regionLoads_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          regionLoadsBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder addAllRegionLoads(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad> values) {
+        if (regionLoadsBuilder_ == null) {
+          ensureRegionLoadsIsMutable();
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
+              values, regionLoads_);
+          onChanged();
+        } else {
+          regionLoadsBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder clearRegionLoads() {
+        if (regionLoadsBuilder_ == null) {
+          regionLoads_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          regionLoadsBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public Builder removeRegionLoads(int index) {
+        if (regionLoadsBuilder_ == null) {
+          ensureRegionLoadsIsMutable();
+          regionLoads_.remove(index);
+          onChanged();
+        } else {
+          regionLoadsBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder getRegionLoadsBuilder(
+          int index) {
+        return getRegionLoadsFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
+          int index) {
+        if (regionLoadsBuilder_ == null) {
+          return regionLoads_.get(index);  } else {
+          return regionLoadsBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>
+           getRegionLoadsOrBuilderList() {
+        if (regionLoadsBuilder_ != null) {
+          return regionLoadsBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(regionLoads_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder() {
+        return getRegionLoadsFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder(
+          int index) {
+        return getRegionLoadsFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder>
+           getRegionLoadsBuilderList() {
+        return getRegionLoadsFieldBuilder().getBuilderList();
+      }
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>
+          getRegionLoadsFieldBuilder() {
+        if (regionLoadsBuilder_ == null) {
+          regionLoadsBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>(
+                  regionLoads_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          regionLoads_ = null;
+        }
+        return regionLoadsBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetRegionLoadResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetRegionLoadResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetRegionLoadResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<GetRegionLoadResponse>() {
+      public GetRegionLoadResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new GetRegionLoadResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetRegionLoadResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetRegionLoadResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
   /**
    * Protobuf service {@code hbase.pb.AdminService}
    */
@@ -24797,6 +26140,14 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse> done);
 
+      /**
+       * <code>rpc GetRegionLoad(.hbase.pb.GetRegionLoadRequest) returns (.hbase.pb.GetRegionLoadResponse);</code>
+       */
+      public abstract void getRegionLoad(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse> done);
+
     }
 
     public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService(
@@ -24946,6 +26297,14 @@ public final class AdminProtos {
           impl.updateConfiguration(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void getRegionLoad(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse> done) {
+          impl.getRegionLoad(controller, request, done);
+        }
+
       };
     }
 
@@ -25004,6 +26363,8 @@ public final class AdminProtos {
               return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request);
             case 17:
               return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request);
+            case 18:
+              return impl.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -25054,6 +26415,8 @@ public final class AdminProtos {
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance();
             case 17:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance();
+            case 18:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -25104,6 +26467,8 @@ public final class AdminProtos {
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance();
             case 17:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance();
+            case 18:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -25256,6 +26621,14 @@ public final class AdminProtos {
         org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse> done);
 
+    /**
+     * <code>rpc GetRegionLoad(.hbase.pb.GetRegionLoadRequest) returns (.hbase.pb.GetRegionLoadResponse);</code>
+     */
+    public abstract void getRegionLoad(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse> done);
+
     public static final
         org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -25368,6 +26741,11 @@ public final class AdminProtos {
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse>specializeCallback(
               done));
           return;
+        case 18:
+          this.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -25418,6 +26796,8 @@ public final class AdminProtos {
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance();
         case 17:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance();
+        case 18:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -25468,6 +26848,8 @@ public final class AdminProtos {
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance();
         case 17:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance();
+        case 18:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -25758,6 +27140,21 @@ public final class AdminProtos {
             org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.class,
             org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()));
       }
+
+      public  void getRegionLoad(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(18),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance()));
+      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -25855,6 +27252,11 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse getRegionLoad(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -26079,6 +27481,18 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance());
       }
 
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse getRegionLoad(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(18),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance());
+      }
+
     }
 
     // @@protoc_insertion_point(class_scope:hbase.pb.AdminService)
@@ -26274,6 +27688,16 @@ public final class AdminProtos {
   private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetRegionLoadRequest_descriptor;
+  private static final
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_GetRegionLoadRequest_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_GetRegionLoadResponse_descriptor;
+  private static final
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable;
 
   public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -26283,123 +27707,129 @@ public final class AdminProtos {
       descriptor;
   static {
     java.lang.String[] descriptorData = {
-      "\n\013Admin.proto\022\010hbase.pb\032\013HBase.proto\032\tWA" +
-      "L.proto\"[\n\024GetRegionInfoRequest\022)\n\006regio" +
-      "n\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\030\n\020co" +
-      "mpaction_state\030\002 \001(\010\"\353\001\n\025GetRegionInfoRe" +
-      "sponse\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.R" +
-      "egionInfo\022I\n\020compaction_state\030\002 \001(\0162/.hb" +
-      "ase.pb.GetRegionInfoResponse.CompactionS" +
-      "tate\022\024\n\014isRecovering\030\003 \001(\010\"F\n\017Compaction" +
-      "State\022\010\n\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJOR\020\002\022\023\n" +
-      "\017MAJOR_AND_MINOR\020\003\"P\n\023GetStoreFileReques",
-      "t\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpeci" +
-      "fier\022\016\n\006family\030\002 \003(\014\"*\n\024GetStoreFileResp" +
-      "onse\022\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetOnlineReg" +
-      "ionRequest\"D\n\027GetOnlineRegionResponse\022)\n" +
-      "\013region_info\030\001 \003(\0132\024.hbase.pb.RegionInfo" +
-      "\"\263\002\n\021OpenRegionRequest\022=\n\topen_info\030\001 \003(" +
-      "\0132*.hbase.pb.OpenRegionRequest.RegionOpe" +
-      "nInfo\022\027\n\017serverStartCode\030\002 \001(\004\022\032\n\022master" +
-      "_system_time\030\005 \001(\004\032\251\001\n\016RegionOpenInfo\022$\n" +
-      "\006region\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\037\n\027v",
-      "ersion_of_offline_node\030\002 \001(\r\022+\n\rfavored_" +
-      "nodes\030\003 \003(\0132\024.hbase.pb.ServerName\022#\n\033ope" +
-      "nForDistributedLogReplay\030\004 \001(\010\"\246\001\n\022OpenR" +
-      "egionResponse\022F\n\ropening_state\030\001 \003(\0162/.h" +
-      "base.pb.OpenRegionResponse.RegionOpening" +
-      "State\"H\n\022RegionOpeningState\022\n\n\006OPENED\020\000\022" +
-      "\022\n\016ALREADY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002\"" +
-      "?\n\023WarmupRegionRequest\022(\n\nregionInfo\030\001 \002" +
-      "(\0132\024.hbase.pb.RegionInfo\"\026\n\024WarmupRegion" +
-      "Response\"\313\001\n\022CloseRegionRequest\022)\n\006regio",
-      "n\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\037\n\027ve" +
-      "rsion_of_closing_node\030\002 \001(\r\022\036\n\020transitio" +
-      "n_in_ZK\030\003 \001(\010:\004true\0220\n\022destination_serve" +
-      "r\030\004 \001(\0132\024.hbase.pb.ServerName\022\027\n\017serverS" +
-      "tartCode\030\005 \001(\004\"%\n\023CloseRegionResponse\022\016\n" +
-      "\006closed\030\001 \002(\010\"G\n\032CloseRegionForSplitRequ" +
-      "est\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpe" +
-      "cifier\"-\n\033CloseRegionForSplitResponse\022\016\n" +
-      "\006closed\030\001 \002(\010\"y\n\022FlushRegionRequest\022)\n\006r" +
-      "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\030",
-      "\n\020if_older_than_ts\030\002 \001(\004\022\036\n\026write_flush_" +
-      "wal_marker\030\003 \001(\010\"_\n\023FlushRegionResponse\022" +
-      "\027\n\017last_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(" +
-      "\010\022\036\n\026wrote_flush_wal_marker\030\003 \001(\010\"T\n\022Spl" +
-      "itRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase." +
-      "pb.RegionSpecifier\022\023\n\013split_point\030\002 \001(\014\"" +
-      "\025\n\023SplitRegionResponse\"`\n\024CompactRegionR" +
-      "equest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region" +
-      "Specifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014" +
-      "\"\027\n\025CompactRegionResponse\"\315\001\n\031UpdateFavo",
-      "redNodesRequest\022I\n\013update_info\030\001 \003(\01324.h" +
-      "base.pb.UpdateFavoredNodesRequest.Region" +
-      "UpdateInfo\032e\n\020RegionUpdateInfo\022$\n\006region" +
-      "\030\001 \002(\0132\024.hbase.pb.RegionInfo\022+\n\rfavored_" +
-      "nodes\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032Upd" +
-      "ateFavoredNodesResponse\022\020\n\010response\030\001 \001(" +
-      "\r\"\244\001\n\023MergeRegionsRequest\022+\n\010region_a\030\001 " +
-      "\002(\0132\031.hbase.pb.RegionSpecifier\022+\n\010region" +
-      "_b\030\002 \002(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010f" +
-      "orcible\030\003 \001(\010:\005false\022\032\n\022master_system_ti",
-      "me\030\004 \001(\004\"\026\n\024MergeRegionsResponse\"a\n\010WALE" +
-      "ntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n\017k" +
-      "ey_value_bytes\030\002 \003(\014\022\035\n\025associated_cell_" +
-      "count\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryRequest" +
-      "\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n\024r" +
-      "eplicationClusterId\030\002 \001(\t\022\"\n\032sourceBaseN" +
-      "amespaceDirPath\030\003 \001(\t\022!\n\031sourceHFileArch" +
-      "iveDirPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryRes" +
-      "ponse\"\026\n\024RollWALWriterRequest\"0\n\025RollWAL" +
-      "WriterResponse\022\027\n\017region_to_flush\030\001 \003(\014\"",
-      "#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"\024\n\022" +
-      "StopServerResponse\"\026\n\024GetServerInfoReque" +
-      "st\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132\024." +
-      "hbase.pb.ServerName\022\022\n\nwebui_port\030\002 \001(\r\"" +
-      "B\n\025GetServerInfoResponse\022)\n\013server_info\030" +
-      "\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateCon" +
-      "figurationRequest\"\035\n\033UpdateConfiguration" +
-      "Response2\353\013\n\014AdminService\022P\n\rGetRegionIn" +
-      "fo\022\036.hbase.pb.GetRegionInfoRequest\032\037.hba" +
-      "se.pb.GetRegionInfoResponse\022M\n\014GetStoreF",
-      "ile\022\035.hbase.pb.GetStoreFileRequest\032\036.hba" +
-      "se.pb.GetStoreFileResponse\022V\n\017GetOnlineR" +
-      "egion\022 .hbase.pb.GetOnlineRegionRequest\032" +
-      "!.hbase.pb.GetOnlineRegionResponse\022G\n\nOp" +
-      "enRegion\022\033.hbase.pb.OpenRegionRequest\032\034." +
-      "hbase.pb.OpenRegionResponse\022M\n\014WarmupReg" +
-      "ion\022\035.hbase.pb.WarmupRegionRequest\032\036.hba" +
-      "se.pb.WarmupRegionResponse\022J\n\013CloseRegio" +
-      "n\022\034.hbase.pb.CloseRegionRequest\032\035.hbase." +
-      "pb.CloseRegionResponse\022b\n\023CloseRegionFor",
-      "Split\022$.hbase.pb.CloseRegionForSplitRequ" +
-      "est\032%.hbase.pb.CloseRegionForSplitRespon" +
-      "se\022J\n\013FlushRegion\022\034.hbase.pb.FlushRegion" +
-      "Request\032\035.hbase.pb.FlushRegionResponse\022J" +
-      "\n\013SplitRegion\022\034.hbase.pb.SplitRegionRequ" +
-      "est\032\035.hbase.pb.SplitRegionResponse\022P\n\rCo" +
-      "mpactRegion\022\036.hbase.pb.CompactRegionRequ" +
-      "est\032\037.hbase.pb.CompactRegionResponse\022M\n\014" +
-      "MergeRegions\022\035.hbase.pb.MergeRegionsRequ" +
-      "est\032\036.hbase.pb.MergeRegionsResponse\022\\\n\021R",
-      "eplicateWALEntry\022\".hbase.pb.ReplicateWAL" +
-      "EntryRequest\032#.hbase.pb.ReplicateWALEntr" +
-      "yResponse\022Q\n\006Replay\022\".hbase.pb.Replicate" +
-      "WALEntryRequest\032#.hbase.pb.ReplicateWALE" +
-      "ntryResponse\022P\n\rRollWALWriter\022\036.hbase.pb" +
-      ".RollWALWriterRequest\032\037.hbase.pb.RollWAL" +
-      "WriterResponse\022P\n\rGetServerInfo\022\036.hbase." +
-      "pb.GetServerInfoRequest\032\037.hbase.pb.GetSe" +
-      "rverInfoResponse\022G\n\nStopServer\022\033.hbase.p" +
-      "b.StopServerRequest\032\034.hbase.pb.StopServe",
-      "rResponse\022_\n\022UpdateFavoredNodes\022#.hbase." +
-      "pb.UpdateFavoredNodesRequest\032$.hbase.pb." +
-      "UpdateFavoredNodesResponse\022b\n\023UpdateConf" +
-      "iguration\022$.hbase.pb.UpdateConfiguration" +
-      "Request\032%.hbase.pb.UpdateConfigurationRe" +
-      "sponseBH\n1org.apache.hadoop.hbase.shaded" +
-      ".protobuf.generatedB\013AdminProtosH\001\210\001\001\240\001\001"
+      "\n\013Admin.proto\022\010hbase.pb\032\023ClusterStatus.p" +
+      "roto\032\013HBase.proto\032\tWAL.proto\"[\n\024GetRegio" +
+      "nInfoRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." +
+      "RegionSpecifier\022\030\n\020compaction_state\030\002 \001(" +
+      "\010\"\353\001\n\025GetRegionInfoResponse\022)\n\013region_in" +
+      "fo\030\001 \002(\0132\024.hbase.pb.RegionInfo\022I\n\020compac" +
+      "tion_state\030\002 \001(\0162/.hbase.pb.GetRegionInf" +
+      "oResponse.CompactionState\022\024\n\014isRecoverin" +
+      "g\030\003 \001(\010\"F\n\017CompactionState\022\010\n\004NONE\020\000\022\t\n\005" +
+      "MINOR\020\001\022\t\n\005MAJOR\020\002\022\023\n\017MAJOR_AND_MINOR\020\003\"",
+      "P\n\023GetStoreFileRequest\022)\n\006region\030\001 \002(\0132\031" +
+      ".hbase.pb.RegionSpecifier\022\016\n\006family\030\002 \003(" +
+      "\014\"*\n\024GetStoreFileResponse\022\022\n\nstore_file\030" +
+      "\001 \003(\t\"\030\n\026GetOnlineRegionRequest\"D\n\027GetOn" +
+      "lineRegionResponse\022)\n\013region_info\030\001 \003(\0132" +
+      "\024.hbase.pb.RegionInfo\"\263\002\n\021OpenRegionRequ" +
+      "est\022=\n\topen_info\030\001 \003(\0132*.hbase.pb.OpenRe" +
+      "gionRequest.RegionOpenInfo\022\027\n\017serverStar" +
+      "tCode\030\002 \001(\004\022\032\n\022master_system_time\030\005 \001(\004\032" +
+      "\251\001\n\016RegionOpenInfo\022$\n\006region\030\001 \002(\0132\024.hba",
+      "se.pb.RegionInfo\022\037\n\027version_of_offline_n" +
+      "ode\030\002 \001(\r\022+\n\rfavored_nodes\030\003 \003(\0132\024.hbase" +
+      ".pb.ServerName\022#\n\033openForDistributedLogR" +
+      "eplay\030\004 \001(\010\"\246\001\n\022OpenRegionResponse\022F\n\rop" +
+      "ening_state\030\001 \003(\0162/.hbase.pb.OpenRegionR" +
+      "esponse.RegionOpeningState\"H\n\022RegionOpen" +
+      "ingState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENED\020\001" +
+      "\022\022\n\016FAILED_OPENING\020\002\"?\n\023WarmupRegionRequ" +
+      "est\022(\n\nregionInfo\030\001 \002(\0132\024.hbase.pb.Regio" +
+      "nInfo\"\026\n\024WarmupRegionResponse\"\313\001\n\022CloseR",
+      "egionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." +
+      "RegionSpecifier\022\037\n\027version_of_closing_no" +
+      "de\030\002 \001(\r\022\036\n\020transition_in_ZK\030\003 \001(\010:\004true" +
+      "\0220\n\022destination_server\030\004 \001(\0132\024.hbase.pb." +
+      "ServerName\022\027\n\017serverStartCode\030\005 \001(\004\"%\n\023C" +
+      "loseRegionResponse\022\016\n\006closed\030\001 \002(\010\"G\n\032Cl" +
+      "oseRegionForSplitRequest\022)\n\006region\030\001 \002(\013" +
+      "2\031.hbase.pb.RegionSpecifier\"-\n\033CloseRegi" +
+      "onForSplitResponse\022\016\n\006closed\030\001 \002(\010\"y\n\022Fl" +
+      "ushRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase",
+      ".pb.RegionSpecifier\022\030\n\020if_older_than_ts\030" +
+      "\002 \001(\004\022\036\n\026write_flush_wal_marker\030\003 \001(\010\"_\n" +
+      "\023FlushRegionResponse\022\027\n\017last_flush_time\030" +
+      "\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\022\036\n\026wrote_flush_wa" +
+      "l_marker\030\003 \001(\010\"T\n\022SplitRegionRequest\022)\n\006" +
+      "region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022" +
+      "\023\n\013split_point\030\002 \001(\014\"\025\n\023SplitRegionRespo" +
+      "nse\"`\n\024CompactRegionRequest\022)\n\006region\030\001 " +
+      "\002(\0132\031.hbase.pb.RegionSpecifier\022\r\n\005major\030" +
+      "\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRegionRe",
+      "sponse\"\315\001\n\031UpdateFavoredNodesRequest\022I\n\013" +
+      "update_info\030\001 \003(\01324.hbase.pb.UpdateFavor" +
+      "edNodesRequest.RegionUpdateInfo\032e\n\020Regio" +
+      "nUpdateInfo\022$\n\006region\030\001 \002(\0132\024.hbase.pb.R" +
+      "egionInfo\022+\n\rfavored_nodes\030\002 \003(\0132\024.hbase" +
+      ".pb.ServerName\".\n\032UpdateFavoredNodesResp" +
+      "onse\022\020\n\010response\030\001 \001(\r\"\244\001\n\023MergeRegionsR" +
+      "equest\022+\n\010region_a\030\001 \002(\0132\031.hbase.pb.Regi" +
+      "onSpecifier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb" +
+      ".RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005fals",
+      "e\022\032\n\022master_system_time\030\004 \001(\004\"\026\n\024MergeRe" +
+      "gionsResponse\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020" +
+      ".hbase.pb.WALKey\022\027\n\017key_value_bytes\030\002 \003(" +
+      "\014\022\035\n\025associated_cell_count\030\003 \001(\005\"\242\001\n\030Rep" +
+      "licateWALEntryRequest\022!\n\005entry\030\001 \003(\0132\022.h" +
+      "base.pb.WALEntry\022\034\n\024replicationClusterId" +
+      "\030\002 \001(\t\022\"\n\032sourceBaseNamespaceDirPath\030\003 \001" +
+      "(\t\022!\n\031sourceHFileArchiveDirPath\030\004 \001(\t\"\033\n" +
+      "\031ReplicateWALEntryResponse\"\026\n\024RollWALWri" +
+      "terRequest\"0\n\025RollWALWriterResponse\022\027\n\017r",
+      "egion_to_flush\030\001 \003(\014\"#\n\021StopServerReques" +
+      "t\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerResponse\"" +
+      "\026\n\024GetServerInfoRequest\"K\n\nServerInfo\022)\n" +
+      "\013server_name\030\001 \002(\0132\024.hbase.pb.ServerName" +
+      "\022\022\n\nwebui_port\030\002 \001(\r\"B\n\025GetServerInfoRes" +
+      "ponse\022)\n\013server_info\030\001 \002(\0132\024.hbase.pb.Se" +
+      "rverInfo\"\034\n\032UpdateConfigurationRequest\"\035" +
+      "\n\033UpdateConfigurationResponse\"?\n\024GetRegi" +
+      "onLoadRequest\022\'\n\ntable_name\030\001 \001(\0132\023.hbas" +
+      "e.pb.TableName\"C\n\025GetRegionLoadResponse\022",
+      "*\n\014region_loads\030\001 \003(\0132\024.hbase.pb.RegionL" +
+      "oad2\275\014\n\014AdminService\022P\n\rGetRegionInfo\022\036." +
+      "hbase.pb.GetRegionInfoRequest\032\037.hbase.pb" +
+      ".GetRegionInfoResponse\022M\n\014GetStoreFile\022\035" +
+      ".hbase.pb.GetStoreFileRequest\032\036.hbase.pb" +
+      ".GetStoreFileResponse\022V\n\017GetOnlineRegion" +
+      "\022 .hbase.pb.GetOnlineRegionRequest\032!.hba" +
+      "se.pb.GetOnlineRegionResponse\022G\n\nOpenReg" +
+      "ion\022\033.hbase.pb.OpenRegionRequest\032\034.hbase" +
+      ".pb.OpenRegionResponse\022M\n\014WarmupRegion\022\035",
+      ".hbase.pb.WarmupRegionRequest\032\036.hbase.pb" +
+      ".WarmupRegionResponse\022J\n\013CloseRegion\022\034.h" +
+      "base.pb.CloseRegionRequest\032\035.hbase.pb.Cl" +
+      "oseRegionResponse\022b\n\023CloseRegionForSplit" +
+      "\022$.hbase.pb.CloseRegionForSplitRequest\032%" +
+      ".hbase.pb.CloseRegionForSplitResponse\022J\n" +
+      "\013FlushRegion\022\034.hbase.pb.FlushRegionReque" +
+      "st\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spl" +
+      "itRegion\022\034.hbase.pb.SplitRegionRequest\032\035" +
+      ".hbase.pb.SplitRegionResponse\022P\n\rCompact",
+      "Region\022\036.hbase.pb.CompactRegionRequest\032\037" +
+      ".hbase.pb.CompactRegionResponse\022M\n\014Merge" +
+      "Regions\022\035.hbase.pb.MergeRegionsRequest\032\036" +
+      ".hbase.pb.MergeRegionsResponse\022\\\n\021Replic" +
+      "ateWALEntry\022\".hbase.pb.ReplicateWALEntry" +
+      "Request\032#.hbase.pb.ReplicateWALEntryResp" +
+      "onse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEn" +
+      "tryRequest\032#.hbase.pb.ReplicateWALEntryR" +
+      "esponse\022P\n\rRollWALWriter\022\036.hbase.pb.Roll" +
+      "WALWriterRequest\032\037.hbase.pb.RollWALWrite",
+      "rResponse\022P\n\rGetServerInfo\022\036.hbase.pb.Ge" +
+      "tServerInfoRequest\032\037.hbase.pb.GetServerI" +
+      "nfoResponse\022G\n\nStopServer\022\033.hbase.pb.Sto" +
+      "pServerRequest\032\034.hbase.pb.StopServerResp" +
+      "onse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Up" +
+      "dateFavoredNodesRequest\032$.hbase.pb.Updat" +
+      "eFavoredNodesResponse\022b\n\023UpdateConfigura" +

<TRUNCATED>