You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2018/01/04 05:16:52 UTC

[2/3] hbase git commit: HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all public classes

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
index 301cfef..1227595 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
@@ -17,31 +17,28 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Size;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT;
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.when;
-
 @Category({MiscTests.class, SmallTests.class})
 public class TestRegionSizeCalculator {
 
@@ -134,14 +131,15 @@ public class TestRegionSizeCalculator {
   /**
    * Creates mock returning RegionLoad info about given servers.
   */
-  private Admin mockAdmin(RegionLoad... regionLoadArray) throws Exception {
+  private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception {
     Admin mockAdmin = Mockito.mock(Admin.class);
-    Map<byte[], RegionLoad> regionLoads = new TreeMap<>(Bytes.BYTES_COMPARATOR);
-    for (RegionLoad regionLoad : regionLoadArray) {
-      regionLoads.put(regionLoad.getName(), regionLoad);
+    List<RegionMetrics> regionLoads = new ArrayList<>();
+    for (RegionMetrics regionLoad : regionLoadArray) {
+      regionLoads.add(regionLoad);
     }
     when(mockAdmin.getConfiguration()).thenReturn(configuration);
-    when(mockAdmin.getRegionLoad(sn, TableName.valueOf("sizeTestTable"))).thenReturn(regionLoads);
+    when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable")))
+        .thenReturn(regionLoads);
     return mockAdmin;
   }
 
@@ -150,11 +148,11 @@ public class TestRegionSizeCalculator {
    *
    * @param  fileSizeMb number of megabytes occupied by region in file store in megabytes
    * */
-  private RegionLoad mockRegion(String regionName, int fileSizeMb) {
-    RegionLoad region = Mockito.mock(RegionLoad.class);
-    when(region.getName()).thenReturn(regionName.getBytes());
+  private RegionMetrics mockRegion(String regionName, int fileSizeMb) {
+    RegionMetrics region = Mockito.mock(RegionMetrics.class);
+    when(region.getRegionName()).thenReturn(regionName.getBytes());
     when(region.getNameAsString()).thenReturn(regionName);
-    when(region.getStorefileSizeMB()).thenReturn(fileSizeMb);
+    when(region.getStoreFileSize()).thenReturn(new Size(fileSizeMb, Size.Unit.MEGABYTE));
     return region;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
index 7ee1065..2323bf3 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.Map;
 import javax.ws.rs.GET;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.CacheControl;
@@ -28,11 +29,12 @@ import javax.ws.rs.core.Context;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.UriInfo;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Size;
 import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -67,29 +69,35 @@ public class StorageClusterStatusResource extends ResourceBase {
     }
     servlet.getMetrics().incrementRequests(1);
     try {
-      ClusterStatus status = servlet.getAdmin().getClusterStatus(
+      ClusterMetrics status = servlet.getAdmin().getClusterMetrics(
         EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
       StorageClusterStatusModel model = new StorageClusterStatusModel();
-      model.setRegions(status.getRegionsCount());
+      model.setRegions(status.getRegionCount());
       model.setRequests(status.getRequestCount());
       model.setAverageLoad(status.getAverageLoad());
-      for (ServerName info: status.getServers()) {
-        ServerLoad load = status.getLoad(info);
+      for (Map.Entry<ServerName, ServerMetrics> entry: status.getLiveServerMetrics().entrySet()) {
+        ServerName sn = entry.getKey();
+        ServerMetrics load = entry.getValue();
         StorageClusterStatusModel.Node node =
           model.addLiveNode(
-            info.getHostname() + ":" +
-            Integer.toString(info.getPort()),
-            info.getStartcode(), load.getUsedHeapMB(),
-            load.getMaxHeapMB());
-        node.setRequests(load.getNumberOfRequests());
-        for (RegionLoad region: load.getRegionsLoad().values()) {
-          node.addRegion(region.getName(), region.getStores(),
-            region.getStorefiles(), region.getStorefileSizeMB(),
-            region.getMemStoreSizeMB(), region.getStorefileIndexSizeKB(),
-            region.getReadRequestsCount(), region.getWriteRequestsCount(),
-            region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(),
-            region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(),
-            region.getCurrentCompactedKVs());
+            sn.getHostname() + ":" +
+            Integer.toString(sn.getPort()),
+            sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE),
+            (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE));
+        node.setRequests(load.getRequestCount());
+        for (RegionMetrics region: load.getRegionMetrics().values()) {
+          node.addRegion(region.getRegionName(), region.getStoreCount(),
+            region.getStoreFileCount(),
+            (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE),
+            (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE),
+            (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE),
+            region.getReadRequestCount(),
+            region.getWriteRequestCount(),
+            (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE),
+            (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE),
+            (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE),
+            region.getCompactingCellCount(),
+            region.getCompactedCellCount());
         }
       }
       for (ServerName name: status.getDeadServerNames()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
index 3ac6566..aa4abea 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -64,7 +64,7 @@ public class StorageClusterVersionResource extends ResourceBase {
     try {
       StorageClusterVersionModel model = new StorageClusterVersionModel();
       model.setVersion(
-        servlet.getAdmin().getClusterStatus(EnumSet.of(Option.HBASE_VERSION))
+        servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION))
             .getHBaseVersion());
       ResponseBuilder response = Response.ok(model);
       response.cacheControl(cacheControl);

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index c0278c6..1c70925 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -29,9 +29,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
@@ -73,7 +72,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   private static final Logger LOG = LoggerFactory.getLogger(RSGroupBasedLoadBalancer.class);
 
   private Configuration config;
-  private ClusterStatus clusterStatus;
+  private ClusterMetrics clusterStatus;
   private MasterServices masterServices;
   private volatile RSGroupInfoManager rsGroupInfoManager;
   private LoadBalancer internalBalancer;
@@ -95,8 +94,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   @Override
-  public void setClusterStatus(ClusterStatus st) {
-    this.clusterStatus = st;
+  public void setClusterMetrics(ClusterMetrics sm) {
+    this.clusterStatus = sm;
   }
 
   @Override
@@ -386,7 +385,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
         StochasticLoadBalancer.class, LoadBalancer.class);
     internalBalancer = ReflectionUtils.newInstance(balancerKlass, config);
     internalBalancer.setMasterServices(masterServices);
-    internalBalancer.setClusterStatus(clusterStatus);
+    internalBalancer.setClusterMetrics(clusterStatus);
     internalBalancer.setConf(config);
     internalBalancer.initialize();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 5c8c2aa..1d28233 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -32,14 +32,15 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseCluster;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
@@ -163,15 +164,16 @@ public abstract class TestRSGroupsBase {
   public Map<TableName, Map<ServerName, List<String>>> getTableServerRegionMap()
       throws IOException {
     Map<TableName, Map<ServerName, List<String>>> map = Maps.newTreeMap();
-    ClusterStatus status = TEST_UTIL.getHBaseClusterInterface().getClusterStatus();
-    for(ServerName serverName : status.getServers()) {
-      for(RegionLoad rl : status.getLoad(serverName).getRegionsLoad().values()) {
+    ClusterMetrics status = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics();
+    for (Map.Entry<ServerName, ServerMetrics> entry : status.getLiveServerMetrics().entrySet()) {
+      ServerName serverName = entry.getKey();
+      for(RegionMetrics rl : entry.getValue().getRegionMetrics().values()) {
         TableName tableName = null;
         try {
-          tableName = RegionInfo.getTable(rl.getName());
+          tableName = RegionInfo.getTable(rl.getRegionName());
         } catch (IllegalArgumentException e) {
           LOG.warn("Failed parse a table name from regionname=" +
-              Bytes.toStringBinary(rl.getName()));
+            Bytes.toStringBinary(rl.getRegionName()));
           continue;
         }
         if(!map.containsKey(tableName)) {
@@ -267,11 +269,11 @@ public abstract class TestRSGroupsBase {
 
   // return the real number of region servers, excluding the master embedded region server in 2.0+
   public int getNumServers() throws IOException {
-    ClusterStatus status =
-        admin.getClusterStatus(EnumSet.of(Option.MASTER, Option.LIVE_SERVERS));
-    ServerName master = status.getMaster();
+    ClusterMetrics status =
+        admin.getClusterMetrics(EnumSet.of(Option.MASTER, Option.LIVE_SERVERS));
+    ServerName master = status.getMasterName();
     int count = 0;
-    for (ServerName sn : status.getServers()) {
+    for (ServerName sn : status.getLiveServerMetrics().keySet()) {
       if (!sn.equals(master)) {
         count++;
       }
@@ -494,8 +496,8 @@ public abstract class TestRSGroupsBase {
     }
     //get server which is not a member of new group
     ServerName targetServer = null;
-    for (ServerName server : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
-                                  .getServers()) {
+    for (ServerName server : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+                                  .getLiveServerMetrics().keySet()) {
       if (!newGroup.containsServer(server.getAddress())) {
         targetServer = server;
         break;
@@ -524,7 +526,7 @@ public abstract class TestRSGroupsBase {
         return
             getTableRegionMap().get(tableName) != null &&
                 getTableRegionMap().get(tableName).size() == 6 &&
-                admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
+                admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION))
                      .getRegionStatesInTransition().size() < 1;
       }
     });
@@ -603,13 +605,13 @@ public abstract class TestRSGroupsBase {
           AdminProtos.StopServerRequest.newBuilder().setReason("Die").build());
     } catch(Exception e) {
     }
-    assertFalse(cluster.getClusterStatus().getServers().contains(targetServer));
+    assertFalse(cluster.getClusterMetrics().getLiveServerMetrics().containsKey(targetServer));
 
     //wait for created table to be assigned
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        return cluster.getClusterStatus().getRegionStatesInTransition().isEmpty();
+        return cluster.getClusterMetrics().getRegionStatesInTransition().isEmpty();
       }
     });
     Set<Address> newServers = Sets.newHashSet();
@@ -626,7 +628,7 @@ public abstract class TestRSGroupsBase {
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        return cluster.getClusterStatus().getRegionStatesInTransition().isEmpty();
+        return cluster.getClusterMetrics().getRegionStatesInTransition().isEmpty();
       }
     });
 
@@ -766,7 +768,8 @@ public abstract class TestRSGroupsBase {
 
     //get server which is not a member of new group
     ServerName targetServer = null;
-    for(ServerName server : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
+    for(ServerName server : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+      .getLiveServerMetrics().keySet()) {
       if(!newGroup.containsServer(server.getAddress()) &&
            !rsGroupAdmin.getRSGroupInfo("master").containsServer(server.getAddress())) {
         targetServer = server;
@@ -829,7 +832,7 @@ public abstract class TestRSGroupsBase {
         return getTableRegionMap().get(tableName) != null &&
                 getTableRegionMap().get(tableName).size() == 5 &&
                 getTableServerRegionMap().get(tableName).size() == 1 &&
-                admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
+                admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION))
                      .getRegionStatesInTransition().size() < 1;
       }
     });
@@ -890,11 +893,11 @@ public abstract class TestRSGroupsBase {
       @Override
       public boolean evaluate() throws Exception {
         return !master.getServerManager().areDeadServersInProgress()
-            && cluster.getClusterStatus().getDeadServerNames().size() > 0;
+            && cluster.getClusterMetrics().getDeadServerNames().size() > 0;
       }
     });
-    assertFalse(cluster.getClusterStatus().getServers().contains(targetServer));
-    assertTrue(cluster.getClusterStatus().getDeadServerNames().contains(targetServer));
+    assertFalse(cluster.getClusterMetrics().getLiveServerMetrics().containsKey(targetServer));
+    assertTrue(cluster.getClusterMetrics().getDeadServerNames().contains(targetServer));
     assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
 
     //clear dead servers list
@@ -941,7 +944,7 @@ public abstract class TestRSGroupsBase {
       @Override
       public boolean evaluate() throws Exception {
         return !master.getServerManager().areDeadServersInProgress()
-            && cluster.getClusterStatus().getDeadServerNames().size() > 0;
+            && cluster.getClusterMetrics().getDeadServerNames().size() > 0;
       }
     });
 
@@ -956,15 +959,15 @@ public abstract class TestRSGroupsBase {
     }
     assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
 
-    ServerName sn = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster();
+    ServerName sn = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics().getMasterName();
     TEST_UTIL.getHBaseClusterInterface().stopMaster(sn);
     TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(sn, 60000);
     TEST_UTIL.getHBaseClusterInterface().startMaster(sn.getHostname(), 0);
     TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster(60000);
 
-    assertEquals(3, cluster.getClusterStatus().getServersSize());
-    assertFalse(cluster.getClusterStatus().getServers().contains(targetServer));
-    assertFalse(cluster.getClusterStatus().getDeadServerNames().contains(targetServer));
+    assertEquals(3, cluster.getClusterMetrics().getLiveServerMetrics().size());
+    assertFalse(cluster.getClusterMetrics().getLiveServerMetrics().containsKey(targetServer));
+    assertFalse(cluster.getClusterMetrics().getDeadServerNames().contains(targetServer));
     assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
 
     rsGroupAdmin.removeServers(Sets.newHashSet(targetServer.getAddress()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
index e1f864e..a49a5fa 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
@@ -54,8 +54,8 @@ MasterAddressTracker masterAddressTracker = master.getMasterAddressTracker();
         <th>Start Time</th>
     </tr>
     <%java>
-    Collection<ServerName> backup_masters = master.getClusterStatusWithoutCoprocessor(
-        EnumSet.of(ClusterMetrics.Option.BACKUP_MASTERS)).getBackupMasters();
+    Collection<ServerName> backup_masters = master.getClusterMetricsWithoutCoprocessor(
+        EnumSet.of(ClusterMetrics.Option.BACKUP_MASTERS)).getBackupMasterNames();
     ServerName [] backupServerNames = backup_masters.toArray(new ServerName[backup_masters.size()]);
     Arrays.sort(backupServerNames);
     for (ServerName serverName : backupServerNames) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 6ef5504..a17bc9f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -21,8 +21,7 @@ package org.apache.hadoop.hbase.coprocessor;
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
-
-import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -1263,14 +1262,14 @@ public interface MasterObserver {
   /**
    * Called before get cluster status.
    */
-  default void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
+  default void preGetClusterMetrics(ObserverContext<MasterCoprocessorEnvironment> ctx)
       throws IOException {}
 
   /**
    * Called after get cluster status.
    */
-  default void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
-      ClusterStatus status) throws IOException {}
+  default void postGetClusterMetrics(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    ClusterMetrics status) throws IOException {}
 
   /**
    * Called before clear dead region servers.

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
index 85493d1..6b87194 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java
@@ -35,8 +35,8 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetricsBuilder;
-import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ScheduledChore;
@@ -65,7 +65,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.InternetProtocolFamil
 import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioDatagramChannel;
 import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageEncoder;
 import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
 
 
 /**
@@ -159,12 +158,12 @@ public class ClusterStatusPublisher extends ScheduledChore {
     // We're reusing an existing protobuf message, but we don't send everything.
     // This could be extended in the future, for example if we want to send stuff like the
     //  hbase:meta server name.
-    publisher.publish(new ClusterStatus(ClusterMetricsBuilder.newBuilder()
-        .setHBaseVersion(VersionInfo.getVersion())
-        .setClusterId(master.getMasterFileSystem().getClusterId().toString())
-        .setMasterName(master.getServerName())
-        .setDeadServerNames(sns)
-        .build()));
+    publisher.publish(ClusterMetricsBuilder.newBuilder()
+      .setHBaseVersion(VersionInfo.getVersion())
+      .setClusterId(master.getMasterFileSystem().getClusterId().toString())
+      .setMasterName(master.getServerName())
+      .setDeadServerNames(sns)
+      .build());
   }
 
   protected void cleanup() {
@@ -229,7 +228,7 @@ public class ClusterStatusPublisher extends ScheduledChore {
 
     void connect(Configuration conf) throws IOException;
 
-    void publish(ClusterStatus cs);
+    void publish(ClusterMetrics cs);
 
     @Override
     void close();
@@ -289,7 +288,7 @@ public class ClusterStatusPublisher extends ScheduledChore {
       b.group(group)
         .channelFactory(new HBaseDatagramChannelFactory<Channel>(NioDatagramChannel.class, family))
         .option(ChannelOption.SO_REUSEADDR, true)
-        .handler(new ClusterStatusEncoder(isa));
+        .handler(new ClusterMetricsEncoder(isa));
 
       try {
         channel = (DatagramChannel) b.bind(bindAddress, 0).sync().channel();
@@ -328,24 +327,24 @@ public class ClusterStatusPublisher extends ScheduledChore {
       }
     }
 
-    private static final class ClusterStatusEncoder extends MessageToMessageEncoder<ClusterStatus> {
+    private static final class ClusterMetricsEncoder
+        extends MessageToMessageEncoder<ClusterMetrics> {
       final private InetSocketAddress isa;
 
-      private ClusterStatusEncoder(InetSocketAddress isa) {
+      private ClusterMetricsEncoder(InetSocketAddress isa) {
         this.isa = isa;
       }
 
       @Override
       protected void encode(ChannelHandlerContext channelHandlerContext,
-                            ClusterStatus clusterStatus, List<Object> objects) {
-        ClusterStatusProtos.ClusterStatus csp
-          = ClusterMetricsBuilder.toClusterStatus(clusterStatus);
-        objects.add(new DatagramPacket(Unpooled.wrappedBuffer(csp.toByteArray()), isa));
+        ClusterMetrics clusterStatus, List<Object> objects) {
+        objects.add(new DatagramPacket(Unpooled.wrappedBuffer(
+          ClusterMetricsBuilder.toClusterStatus(clusterStatus).toByteArray()), isa));
       }
     }
 
     @Override
-    public void publish(ClusterStatus cs) {
+    public void publish(ClusterMetrics cs) {
       channel.writeAndFlush(cs).syncUninterruptibly();
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index dec1040..945f54d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -56,9 +56,9 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.ClusterMetricsBuilder;
-import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -834,7 +834,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     //initialize load balancer
     this.balancer.setMasterServices(this);
-    this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
+    this.balancer.setClusterMetrics(getClusterMetricsWithoutCoprocessor());
     this.balancer.initialize();
 
     // Check if master is shutting down because of some issue
@@ -878,7 +878,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     this.assignmentManager.joinCluster();
 
     // set cluster status again after user regions are assigned
-    this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
+    this.balancer.setClusterMetrics(getClusterMetricsWithoutCoprocessor());
 
     // Start balancer and meta catalog janitor after meta and regions have been assigned.
     status.setStatus("Starting balancer and catalog janitor");
@@ -1404,7 +1404,7 @@ public class HMaster extends HRegionServer implements MasterServices {
       List<RegionPlan> plans = new ArrayList<>();
 
       //Give the balancer the current cluster state.
-      this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
+      this.balancer.setClusterMetrics(getClusterMetricsWithoutCoprocessor());
       this.balancer.setClusterLoad(assignmentsByTable);
 
       for (Map<ServerName, List<RegionInfo>> serverMap : assignmentsByTable.values()) {
@@ -2404,11 +2404,11 @@ public class HMaster extends HRegionServer implements MasterServices {
     }
   }
 
-  public ClusterStatus getClusterStatusWithoutCoprocessor() throws InterruptedIOException {
-    return getClusterStatusWithoutCoprocessor(EnumSet.allOf(Option.class));
+  public ClusterMetrics getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
+    return getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
   }
 
-  public ClusterStatus getClusterStatusWithoutCoprocessor(EnumSet<Option> options)
+  public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet<Option> options)
       throws InterruptedIOException {
     ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
     // given that hbase1 can't submit the request with Option,
@@ -2464,23 +2464,23 @@ public class HMaster extends HRegionServer implements MasterServices {
         }
       }
     }
-    return new ClusterStatus(builder.build());
+    return builder.build();
   }
 
   /**
    * @return cluster status
    */
-  public ClusterStatus getClusterStatus() throws IOException {
-    return getClusterStatus(EnumSet.allOf(Option.class));
+  public ClusterMetrics getClusterMetrics() throws IOException {
+    return getClusterMetrics(EnumSet.allOf(Option.class));
   }
 
-  public ClusterStatus getClusterStatus(EnumSet<Option> options) throws IOException {
+  public ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException {
     if (cpHost != null) {
-      cpHost.preGetClusterStatus();
+      cpHost.preGetClusterMetrics();
     }
-    ClusterStatus status = getClusterStatusWithoutCoprocessor(options);
+    ClusterMetrics status = getClusterMetricsWithoutCoprocessor(options);
     if (cpHost != null) {
-      cpHost.postGetClusterStatus(status);
+      cpHost.postGetClusterMetrics(status);
     }
     return status;
   }
@@ -3173,14 +3173,14 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   @Override
   public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
-    return getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
-        .getLastMajorCompactionTsForTable(table);
+    return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+        .getLastMajorCompactionTimestamp(table);
   }
 
   @Override
   public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
-    return getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
-        .getLastMajorCompactionTsForRegion(regionName);
+    return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+        .getLastMajorCompactionTimestamp(regionName);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 1dad70d..f1a0593 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -18,12 +18,12 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import edu.umd.cs.findbugs.annotations.Nullable;
 import java.util.List;
 import java.util.Map;
-
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
@@ -32,8 +32,6 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import edu.umd.cs.findbugs.annotations.Nullable;
-
 /**
  * Makes decisions about the placement and movement of Regions across
  * RegionServers.
@@ -71,7 +69,7 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse
    * Set the current cluster status.  This allows a LoadBalancer to map host name to a server
    * @param st
    */
-  void setClusterStatus(ClusterStatus st);
+  void setClusterMetrics(ClusterMetrics st);
 
   /**
    * Pass RegionStates and allow balancer to set the current cluster load.

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 9eb2ec8..10e1d0a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -18,14 +18,12 @@
 
 package org.apache.hadoop.hbase.master;
 
+import com.google.protobuf.Service;
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
-
-import com.google.protobuf.Service;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
@@ -1563,20 +1561,20 @@ public class MasterCoprocessorHost
     });
   }
 
-  public void preGetClusterStatus() throws IOException {
+  public void preGetClusterMetrics() throws IOException {
     execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
       @Override
       public void call(MasterObserver observer) throws IOException {
-        observer.preGetClusterStatus(this);
+        observer.preGetClusterMetrics(this);
       }
     });
   }
 
-  public void postGetClusterStatus(ClusterStatus status) throws IOException {
+  public void postGetClusterMetrics(ClusterMetrics status) throws IOException {
     execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
       @Override
       public void call(MasterObserver observer) throws IOException {
-        observer.postGetClusterStatus(this, status);
+        observer.postGetClusterMetrics(this, status);
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 5ada8fd..8f41e4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -30,7 +30,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterMetricsBuilder;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -905,7 +904,7 @@ public class MasterRpcServices extends RSRpcServices
     try {
       master.checkInitialized();
       response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus(
-        master.getClusterStatus(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
+        master.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList()))));
     } catch (IOException e) {
       throw new ServiceException(e);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 96b31c7..68ec524 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -191,265 +191,268 @@ public class RegionPlacementMaintainer {
    * @throws IOException
    */
   private void genAssignmentPlan(TableName tableName,
-      SnapshotOfRegionAssignmentFromMeta assignmentSnapshot,
-      Map<String, Map<String, Float>> regionLocalityMap, FavoredNodesPlan plan,
-      boolean munkresForSecondaryAndTertiary) throws IOException {
-      // Get the all the regions for the current table
-      List<RegionInfo> regions =
-        assignmentSnapshot.getTableToRegionMap().get(tableName);
-      int numRegions = regions.size();
-
-      // Get the current assignment map
-      Map<RegionInfo, ServerName> currentAssignmentMap =
-        assignmentSnapshot.getRegionToRegionServerMap();
-
-      // Get the all the region servers
-      List<ServerName> servers = new ArrayList<>();
-      try (Admin admin = this.connection.getAdmin()) {
-        servers.addAll(admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers());
-      }
-
-      LOG.info("Start to generate assignment plan for " + numRegions +
-          " regions from table " + tableName + " with " +
-          servers.size() + " region servers");
+    SnapshotOfRegionAssignmentFromMeta assignmentSnapshot,
+    Map<String, Map<String, Float>> regionLocalityMap, FavoredNodesPlan plan,
+    boolean munkresForSecondaryAndTertiary) throws IOException {
+    // Get the all the regions for the current table
+    List<RegionInfo> regions =
+      assignmentSnapshot.getTableToRegionMap().get(tableName);
+    int numRegions = regions.size();
+
+    // Get the current assignment map
+    Map<RegionInfo, ServerName> currentAssignmentMap =
+      assignmentSnapshot.getRegionToRegionServerMap();
+
+    // Get the all the region servers
+    List<ServerName> servers = new ArrayList<>();
+    try (Admin admin = this.connection.getAdmin()) {
+      servers.addAll(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+        .getLiveServerMetrics().keySet());
+    }
 
-      int slotsPerServer = (int) Math.ceil((float) numRegions /
-          servers.size());
-      int regionSlots = slotsPerServer * servers.size();
-
-      // Compute the primary, secondary and tertiary costs for each region/server
-      // pair. These costs are based only on node locality and rack locality, and
-      // will be modified later.
-      float[][] primaryCost = new float[numRegions][regionSlots];
-      float[][] secondaryCost = new float[numRegions][regionSlots];
-      float[][] tertiaryCost = new float[numRegions][regionSlots];
-
-      if (this.enforceLocality && regionLocalityMap != null) {
-        // Transform the locality mapping into a 2D array, assuming that any
-        // unspecified locality value is 0.
-        float[][] localityPerServer = new float[numRegions][regionSlots];
-        for (int i = 0; i < numRegions; i++) {
-          Map<String, Float> serverLocalityMap =
-              regionLocalityMap.get(regions.get(i).getEncodedName());
-          if (serverLocalityMap == null) {
+    LOG.info("Start to generate assignment plan for " + numRegions +
+        " regions from table " + tableName + " with " +
+        servers.size() + " region servers");
+
+    int slotsPerServer = (int) Math.ceil((float) numRegions /
+        servers.size());
+    int regionSlots = slotsPerServer * servers.size();
+
+    // Compute the primary, secondary and tertiary costs for each region/server
+    // pair. These costs are based only on node locality and rack locality, and
+    // will be modified later.
+    float[][] primaryCost = new float[numRegions][regionSlots];
+    float[][] secondaryCost = new float[numRegions][regionSlots];
+    float[][] tertiaryCost = new float[numRegions][regionSlots];
+
+    if (this.enforceLocality && regionLocalityMap != null) {
+      // Transform the locality mapping into a 2D array, assuming that any
+      // unspecified locality value is 0.
+      float[][] localityPerServer = new float[numRegions][regionSlots];
+      for (int i = 0; i < numRegions; i++) {
+        Map<String, Float> serverLocalityMap =
+            regionLocalityMap.get(regions.get(i).getEncodedName());
+        if (serverLocalityMap == null) {
+          continue;
+        }
+        for (int j = 0; j < servers.size(); j++) {
+          String serverName = servers.get(j).getHostname();
+          if (serverName == null) {
             continue;
           }
-          for (int j = 0; j < servers.size(); j++) {
-            String serverName = servers.get(j).getHostname();
-            if (serverName == null) {
-              continue;
-            }
-            Float locality = serverLocalityMap.get(serverName);
-            if (locality == null) {
-              continue;
-            }
-            for (int k = 0; k < slotsPerServer; k++) {
-              // If we can't find the locality of a region to a server, which occurs
-              // because locality is only reported for servers which have some
-              // blocks of a region local, then the locality for that pair is 0.
-              localityPerServer[i][j * slotsPerServer + k] = locality.floatValue();
-            }
+          Float locality = serverLocalityMap.get(serverName);
+          if (locality == null) {
+            continue;
+          }
+          for (int k = 0; k < slotsPerServer; k++) {
+            // If we can't find the locality of a region to a server, which occurs
+            // because locality is only reported for servers which have some
+            // blocks of a region local, then the locality for that pair is 0.
+            localityPerServer[i][j * slotsPerServer + k] = locality.floatValue();
           }
         }
+      }
 
-        // Compute the total rack locality for each region in each rack. The total
-        // rack locality is the sum of the localities of a region on all servers in
-        // a rack.
-        Map<String, Map<RegionInfo, Float>> rackRegionLocality = new HashMap<>();
-        for (int i = 0; i < numRegions; i++) {
-          RegionInfo region = regions.get(i);
-          for (int j = 0; j < regionSlots; j += slotsPerServer) {
-            String rack = rackManager.getRack(servers.get(j / slotsPerServer));
-            Map<RegionInfo, Float> rackLocality = rackRegionLocality.get(rack);
-            if (rackLocality == null) {
-              rackLocality = new HashMap<>();
-              rackRegionLocality.put(rack, rackLocality);
-            }
-            Float localityObj = rackLocality.get(region);
-            float locality = localityObj == null ? 0 : localityObj.floatValue();
-            locality += localityPerServer[i][j];
-            rackLocality.put(region, locality);
+      // Compute the total rack locality for each region in each rack. The total
+      // rack locality is the sum of the localities of a region on all servers in
+      // a rack.
+      Map<String, Map<RegionInfo, Float>> rackRegionLocality = new HashMap<>();
+      for (int i = 0; i < numRegions; i++) {
+        RegionInfo region = regions.get(i);
+        for (int j = 0; j < regionSlots; j += slotsPerServer) {
+          String rack = rackManager.getRack(servers.get(j / slotsPerServer));
+          Map<RegionInfo, Float> rackLocality = rackRegionLocality.get(rack);
+          if (rackLocality == null) {
+            rackLocality = new HashMap<>();
+            rackRegionLocality.put(rack, rackLocality);
           }
+          Float localityObj = rackLocality.get(region);
+          float locality = localityObj == null ? 0 : localityObj.floatValue();
+          locality += localityPerServer[i][j];
+          rackLocality.put(region, locality);
         }
-        for (int i = 0; i < numRegions; i++) {
-          for (int j = 0; j < regionSlots; j++) {
-            String rack = rackManager.getRack(servers.get(j / slotsPerServer));
-            Float totalRackLocalityObj =
-                rackRegionLocality.get(rack).get(regions.get(i));
-            float totalRackLocality = totalRackLocalityObj == null ?
-                0 : totalRackLocalityObj.floatValue();
-
-            // Primary cost aims to favor servers with high node locality and low
-            // rack locality, so that secondaries and tertiaries can be chosen for
-            // nodes with high rack locality. This might give primaries with
-            // slightly less locality at first compared to a cost which only
-            // considers the node locality, but should be better in the long run.
-            primaryCost[i][j] = 1 - (2 * localityPerServer[i][j] -
-                totalRackLocality);
-
-            // Secondary cost aims to favor servers with high node locality and high
-            // rack locality since the tertiary will be chosen from the same rack as
-            // the secondary. This could be negative, but that is okay.
-            secondaryCost[i][j] = 2 - (localityPerServer[i][j] + totalRackLocality);
-
-            // Tertiary cost is only concerned with the node locality. It will later
-            // be restricted to only hosts on the same rack as the secondary.
-            tertiaryCost[i][j] = 1 - localityPerServer[i][j];
-          }
+      }
+      for (int i = 0; i < numRegions; i++) {
+        for (int j = 0; j < regionSlots; j++) {
+          String rack = rackManager.getRack(servers.get(j / slotsPerServer));
+          Float totalRackLocalityObj =
+              rackRegionLocality.get(rack).get(regions.get(i));
+          float totalRackLocality = totalRackLocalityObj == null ?
+              0 : totalRackLocalityObj.floatValue();
+
+          // Primary cost aims to favor servers with high node locality and low
+          // rack locality, so that secondaries and tertiaries can be chosen for
+          // nodes with high rack locality. This might give primaries with
+          // slightly less locality at first compared to a cost which only
+          // considers the node locality, but should be better in the long run.
+          primaryCost[i][j] = 1 - (2 * localityPerServer[i][j] -
+              totalRackLocality);
+
+          // Secondary cost aims to favor servers with high node locality and high
+          // rack locality since the tertiary will be chosen from the same rack as
+          // the secondary. This could be negative, but that is okay.
+          secondaryCost[i][j] = 2 - (localityPerServer[i][j] + totalRackLocality);
+
+          // Tertiary cost is only concerned with the node locality. It will later
+          // be restricted to only hosts on the same rack as the secondary.
+          tertiaryCost[i][j] = 1 - localityPerServer[i][j];
         }
       }
+    }
 
-      if (this.enforceMinAssignmentMove && currentAssignmentMap != null) {
-        // We want to minimize the number of regions which move as the result of a
-        // new assignment. Therefore, slightly penalize any placement which is for
-        // a host that is not currently serving the region.
-        for (int i = 0; i < numRegions; i++) {
-          for (int j = 0; j < servers.size(); j++) {
-            ServerName currentAddress = currentAssignmentMap.get(regions.get(i));
-            if (currentAddress != null &&
-                !currentAddress.equals(servers.get(j))) {
-              for (int k = 0; k < slotsPerServer; k++) {
-                primaryCost[i][j * slotsPerServer + k] += NOT_CURRENT_HOST_PENALTY;
-              }
+    if (this.enforceMinAssignmentMove && currentAssignmentMap != null) {
+      // We want to minimize the number of regions which move as the result of a
+      // new assignment. Therefore, slightly penalize any placement which is for
+      // a host that is not currently serving the region.
+      for (int i = 0; i < numRegions; i++) {
+        for (int j = 0; j < servers.size(); j++) {
+          ServerName currentAddress = currentAssignmentMap.get(regions.get(i));
+          if (currentAddress != null &&
+              !currentAddress.equals(servers.get(j))) {
+            for (int k = 0; k < slotsPerServer; k++) {
+              primaryCost[i][j * slotsPerServer + k] += NOT_CURRENT_HOST_PENALTY;
             }
           }
         }
       }
+    }
 
-      // Artificially increase cost of last slot of each server to evenly
-      // distribute the slop, otherwise there will be a few servers with too few
-      // regions and many servers with the max number of regions.
-      for (int i = 0; i < numRegions; i++) {
-        for (int j = 0; j < regionSlots; j += slotsPerServer) {
-          primaryCost[i][j] += LAST_SLOT_COST_PENALTY;
-          secondaryCost[i][j] += LAST_SLOT_COST_PENALTY;
-          tertiaryCost[i][j] += LAST_SLOT_COST_PENALTY;
-        }
+    // Artificially increase cost of last slot of each server to evenly
+    // distribute the slop, otherwise there will be a few servers with too few
+    // regions and many servers with the max number of regions.
+    for (int i = 0; i < numRegions; i++) {
+      for (int j = 0; j < regionSlots; j += slotsPerServer) {
+        primaryCost[i][j] += LAST_SLOT_COST_PENALTY;
+        secondaryCost[i][j] += LAST_SLOT_COST_PENALTY;
+        tertiaryCost[i][j] += LAST_SLOT_COST_PENALTY;
       }
+    }
 
-      RandomizedMatrix randomizedMatrix = new RandomizedMatrix(numRegions,
-          regionSlots);
-      primaryCost = randomizedMatrix.transform(primaryCost);
-      int[] primaryAssignment = new MunkresAssignment(primaryCost).solve();
-      primaryAssignment = randomizedMatrix.invertIndices(primaryAssignment);
-
-      // Modify the secondary and tertiary costs for each region/server pair to
-      // prevent a region from being assigned to the same rack for both primary
-      // and either one of secondary or tertiary.
+    RandomizedMatrix randomizedMatrix = new RandomizedMatrix(numRegions,
+        regionSlots);
+    primaryCost = randomizedMatrix.transform(primaryCost);
+    int[] primaryAssignment = new MunkresAssignment(primaryCost).solve();
+    primaryAssignment = randomizedMatrix.invertIndices(primaryAssignment);
+
+    // Modify the secondary and tertiary costs for each region/server pair to
+    // prevent a region from being assigned to the same rack for both primary
+    // and either one of secondary or tertiary.
+    for (int i = 0; i < numRegions; i++) {
+      int slot = primaryAssignment[i];
+      String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
+      for (int k = 0; k < servers.size(); k++) {
+        if (!rackManager.getRack(servers.get(k)).equals(rack)) {
+          continue;
+        }
+        if (k == slot / slotsPerServer) {
+          // Same node, do not place secondary or tertiary here ever.
+          for (int m = 0; m < slotsPerServer; m++) {
+            secondaryCost[i][k * slotsPerServer + m] = MAX_COST;
+            tertiaryCost[i][k * slotsPerServer + m] = MAX_COST;
+          }
+        } else {
+          // Same rack, do not place secondary or tertiary here if possible.
+          for (int m = 0; m < slotsPerServer; m++) {
+            secondaryCost[i][k * slotsPerServer + m] = AVOID_COST;
+            tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST;
+          }
+        }
+      }
+    }
+    if (munkresForSecondaryAndTertiary) {
+      randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
+      secondaryCost = randomizedMatrix.transform(secondaryCost);
+      int[] secondaryAssignment = new MunkresAssignment(secondaryCost).solve();
+      secondaryAssignment = randomizedMatrix.invertIndices(secondaryAssignment);
+
+      // Modify the tertiary costs for each region/server pair to ensure that a
+      // region is assigned to a tertiary server on the same rack as its secondary
+      // server, but not the same server in that rack.
       for (int i = 0; i < numRegions; i++) {
-        int slot = primaryAssignment[i];
+        int slot = secondaryAssignment[i];
         String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
         for (int k = 0; k < servers.size(); k++) {
-          if (!rackManager.getRack(servers.get(k)).equals(rack)) {
-            continue;
-          }
           if (k == slot / slotsPerServer) {
-            // Same node, do not place secondary or tertiary here ever.
+            // Same node, do not place tertiary here ever.
             for (int m = 0; m < slotsPerServer; m++) {
-              secondaryCost[i][k * slotsPerServer + m] = MAX_COST;
               tertiaryCost[i][k * slotsPerServer + m] = MAX_COST;
             }
           } else {
-            // Same rack, do not place secondary or tertiary here if possible.
+            if (rackManager.getRack(servers.get(k)).equals(rack)) {
+              continue;
+            }
+            // Different rack, do not place tertiary here if possible.
             for (int m = 0; m < slotsPerServer; m++) {
-              secondaryCost[i][k * slotsPerServer + m] = AVOID_COST;
               tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST;
             }
           }
         }
       }
-      if (munkresForSecondaryAndTertiary) {
-        randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
-        secondaryCost = randomizedMatrix.transform(secondaryCost);
-        int[] secondaryAssignment = new MunkresAssignment(secondaryCost).solve();
-        secondaryAssignment = randomizedMatrix.invertIndices(secondaryAssignment);
-
-        // Modify the tertiary costs for each region/server pair to ensure that a
-        // region is assigned to a tertiary server on the same rack as its secondary
-        // server, but not the same server in that rack.
-        for (int i = 0; i < numRegions; i++) {
-          int slot = secondaryAssignment[i];
-          String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
-          for (int k = 0; k < servers.size(); k++) {
-            if (k == slot / slotsPerServer) {
-              // Same node, do not place tertiary here ever.
-              for (int m = 0; m < slotsPerServer; m++) {
-                tertiaryCost[i][k * slotsPerServer + m] = MAX_COST;
-              }
-            } else {
-              if (rackManager.getRack(servers.get(k)).equals(rack)) {
-                continue;
-              }
-              // Different rack, do not place tertiary here if possible.
-              for (int m = 0; m < slotsPerServer; m++) {
-                tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST;
-              }
-            }
-          }
-        }
 
-        randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
-        tertiaryCost = randomizedMatrix.transform(tertiaryCost);
-        int[] tertiaryAssignment = new MunkresAssignment(tertiaryCost).solve();
-        tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment);
-
-        for (int i = 0; i < numRegions; i++) {
-          List<ServerName> favoredServers = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
-          ServerName s = servers.get(primaryAssignment[i] / slotsPerServer);
-          favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
-              ServerName.NON_STARTCODE));
-
-          s = servers.get(secondaryAssignment[i] / slotsPerServer);
-          favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
-              ServerName.NON_STARTCODE));
-
-          s = servers.get(tertiaryAssignment[i] / slotsPerServer);
-          favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
-              ServerName.NON_STARTCODE));
-          // Update the assignment plan
-          plan.updateFavoredNodesMap(regions.get(i), favoredServers);
-        }
-        LOG.info("Generated the assignment plan for " + numRegions +
-            " regions from table " + tableName + " with " +
-            servers.size() + " region servers");
-        LOG.info("Assignment plan for secondary and tertiary generated " +
-            "using MunkresAssignment");
-      } else {
-        Map<RegionInfo, ServerName> primaryRSMap = new HashMap<>();
-        for (int i = 0; i < numRegions; i++) {
-          primaryRSMap.put(regions.get(i), servers.get(primaryAssignment[i] / slotsPerServer));
-        }
-        FavoredNodeAssignmentHelper favoredNodeHelper =
-            new FavoredNodeAssignmentHelper(servers, conf);
-        favoredNodeHelper.initialize();
-        Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
-            favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap);
-        for (int i = 0; i < numRegions; i++) {
-          List<ServerName> favoredServers = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
-          RegionInfo currentRegion = regions.get(i);
-          ServerName s = primaryRSMap.get(currentRegion);
-          favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
-              ServerName.NON_STARTCODE));
-
-          ServerName[] secondaryAndTertiary =
-              secondaryAndTertiaryMap.get(currentRegion);
-          s = secondaryAndTertiary[0];
-          favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
-              ServerName.NON_STARTCODE));
-
-          s = secondaryAndTertiary[1];
-          favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
-              ServerName.NON_STARTCODE));
-          // Update the assignment plan
-          plan.updateFavoredNodesMap(regions.get(i), favoredServers);
-        }
-        LOG.info("Generated the assignment plan for " + numRegions +
-            " regions from table " + tableName + " with " +
-            servers.size() + " region servers");
-        LOG.info("Assignment plan for secondary and tertiary generated " +
-            "using placeSecondaryAndTertiaryWithRestrictions method");
+      randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
+      tertiaryCost = randomizedMatrix.transform(tertiaryCost);
+      int[] tertiaryAssignment = new MunkresAssignment(tertiaryCost).solve();
+      tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment);
+
+      for (int i = 0; i < numRegions; i++) {
+        List<ServerName> favoredServers
+          = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
+        ServerName s = servers.get(primaryAssignment[i] / slotsPerServer);
+        favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
+            ServerName.NON_STARTCODE));
+
+        s = servers.get(secondaryAssignment[i] / slotsPerServer);
+        favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
+            ServerName.NON_STARTCODE));
+
+        s = servers.get(tertiaryAssignment[i] / slotsPerServer);
+        favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
+            ServerName.NON_STARTCODE));
+        // Update the assignment plan
+        plan.updateFavoredNodesMap(regions.get(i), favoredServers);
+      }
+      LOG.info("Generated the assignment plan for " + numRegions +
+          " regions from table " + tableName + " with " +
+          servers.size() + " region servers");
+      LOG.info("Assignment plan for secondary and tertiary generated " +
+          "using MunkresAssignment");
+    } else {
+      Map<RegionInfo, ServerName> primaryRSMap = new HashMap<>();
+      for (int i = 0; i < numRegions; i++) {
+        primaryRSMap.put(regions.get(i), servers.get(primaryAssignment[i] / slotsPerServer));
+      }
+      FavoredNodeAssignmentHelper favoredNodeHelper =
+          new FavoredNodeAssignmentHelper(servers, conf);
+      favoredNodeHelper.initialize();
+      Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
+          favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap);
+      for (int i = 0; i < numRegions; i++) {
+        List<ServerName> favoredServers
+          = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
+        RegionInfo currentRegion = regions.get(i);
+        ServerName s = primaryRSMap.get(currentRegion);
+        favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
+            ServerName.NON_STARTCODE));
+
+        ServerName[] secondaryAndTertiary =
+            secondaryAndTertiaryMap.get(currentRegion);
+        s = secondaryAndTertiary[0];
+        favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
+            ServerName.NON_STARTCODE));
+
+        s = secondaryAndTertiary[1];
+        favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(),
+            ServerName.NON_STARTCODE));
+        // Update the assignment plan
+        plan.updateFavoredNodesMap(regions.get(i), favoredServers);
       }
+      LOG.info("Generated the assignment plan for " + numRegions +
+          " regions from table " + tableName + " with " +
+          servers.size() + " region servers");
+      LOG.info("Assignment plan for secondary and tertiary generated " +
+          "using placeSecondaryAndTertiaryWithRestrictions method");
     }
+  }
 
   public FavoredNodesPlan getNewAssignmentPlan() throws IOException {
     // Get the current region assignment snapshot by scanning from the META

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java
index 78c460b..843a36e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java
@@ -18,13 +18,16 @@
 
 package org.apache.hadoop.hbase.master.balancer;
 
-import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.Size;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
 /**
  * Wrapper class for the few fields required by the {@link StochasticLoadBalancer}
- * from the full {@link RegionLoad}.
+ * from the full {@link RegionMetrics}.
  */
+@InterfaceAudience.Private
 @InterfaceStability.Evolving
 class BalancerRegionLoad {
   private final long readRequestsCount;
@@ -32,11 +35,11 @@ class BalancerRegionLoad {
   private final int memStoreSizeMB;
   private final int storefileSizeMB;
 
-  BalancerRegionLoad(RegionLoad regionLoad) {
-    readRequestsCount = regionLoad.getReadRequestsCount();
-    writeRequestsCount = regionLoad.getWriteRequestsCount();
-    memStoreSizeMB = regionLoad.getMemStoreSizeMB();
-    storefileSizeMB = regionLoad.getStorefileSizeMB();
+  BalancerRegionLoad(RegionMetrics regionMetrics) {
+    readRequestsCount = regionMetrics.getReadRequestCount();
+    writeRequestsCount = regionMetrics.getWriteRequestCount();
+    memStoreSizeMB = (int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE);
+    storefileSizeMB = (int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
   }
 
   public long getReadRequestsCount() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 750458b..a8dd9ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -36,7 +36,7 @@ import java.util.stream.Collectors;
 
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
@@ -1006,7 +1006,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
   private static final Random RANDOM = new Random(System.currentTimeMillis());
   private static final Logger LOG = LoggerFactory.getLogger(BaseLoadBalancer.class);
   protected MetricsBalancer metricsBalancer = null;
-  protected ClusterStatus clusterStatus = null;
+  protected ClusterMetrics clusterStatus = null;
   protected ServerName masterServerName;
   protected MasterServices services;
   protected boolean tablesOnMaster;
@@ -1128,10 +1128,10 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
   }
 
   @Override
-  public synchronized void setClusterStatus(ClusterStatus st) {
+  public synchronized void setClusterMetrics(ClusterMetrics st) {
     this.clusterStatus = st;
     if (useRegionFinder) {
-      regionFinder.setClusterStatus(st);
+      regionFinder.setClusterMetrics(st);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
index 427322d..c0383fa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
@@ -46,7 +46,7 @@ public class ClusterStatusChore extends ScheduledChore {
   @Override
   protected void chore() {
     try {
-      balancer.setClusterStatus(master.getClusterStatusWithoutCoprocessor());
+      balancer.setClusterMetrics(master.getClusterMetricsWithoutCoprocessor());
     } catch (InterruptedIOException e) {
       LOG.warn("Ignoring interruption", e);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
index e1d878d..a9b1bb7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
@@ -27,9 +27,8 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -64,7 +63,7 @@ class RegionLocationFinder {
   private static final long CACHE_TIME = 240 * 60 * 1000;
   private static final HDFSBlocksDistribution EMPTY_BLOCK_DISTRIBUTION = new HDFSBlocksDistribution();
   private Configuration conf;
-  private volatile ClusterStatus status;
+  private volatile ClusterMetrics status;
   private MasterServices services;
   private final ListeningExecutorService executor;
   // Do not scheduleFullRefresh at master startup
@@ -105,7 +104,6 @@ class RegionLocationFinder {
 
   /**
    * Create a cache for region to list of servers
-   * @param time time to cache the locations
    * @return A new Cache.
    */
   private LoadingCache<RegionInfo, HDFSBlocksDistribution> createCache() {
@@ -126,7 +124,7 @@ class RegionLocationFinder {
     this.services = services;
   }
 
-  public void setClusterStatus(ClusterStatus status) {
+  public void setClusterMetrics(ClusterMetrics status) {
     long currentTime = EnvironmentEdgeManager.currentTime();
     this.status = status;
     if (currentTime > lastFullRefresh + (CACHE_TIME / 2)) {
@@ -244,7 +242,7 @@ class RegionLocationFinder {
     }
 
     List<ServerName> topServerNames = new ArrayList<>();
-    Collection<ServerName> regionServers = status.getServers();
+    Collection<ServerName> regionServers = status.getLiveServerMetrics().keySet();
 
     // create a mapping from hostname to ServerName for fast lookup
     HashMap<String, List<ServerName>> hostToServerName = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 693d8b2..6b4f943 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -27,14 +27,13 @@ import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -226,11 +225,11 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
   }
 
   @Override
-  public synchronized void setClusterStatus(ClusterStatus st) {
-    super.setClusterStatus(st);
+  public synchronized void setClusterMetrics(ClusterMetrics st) {
+    super.setClusterMetrics(st);
     updateRegionLoad();
     for(CostFromRegionLoadFunction cost : regionLoadFunctions) {
-      cost.setClusterStatus(st);
+      cost.setClusterMetrics(st);
     }
 
     // update metrics size
@@ -527,23 +526,19 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
     Map<String, Deque<BalancerRegionLoad>> oldLoads = loads;
     loads = new HashMap<>();
 
-    for (ServerName sn : clusterStatus.getServers()) {
-      ServerLoad sl = clusterStatus.getLoad(sn);
-      if (sl == null) {
-        continue;
-      }
-      for (Entry<byte[], RegionLoad> entry : sl.getRegionsLoad().entrySet()) {
-        Deque<BalancerRegionLoad> rLoads = oldLoads.get(Bytes.toString(entry.getKey()));
+    clusterStatus.getLiveServerMetrics().forEach((ServerName sn, ServerMetrics sm) -> {
+      sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
+        Deque<BalancerRegionLoad> rLoads = oldLoads.get(Bytes.toString(regionName));
         if (rLoads == null) {
           // There was nothing there
           rLoads = new ArrayDeque<>();
         } else if (rLoads.size() >= numRegionLoadsToRemember) {
           rLoads.remove();
         }
-        rLoads.add(new BalancerRegionLoad(entry.getValue()));
-        loads.put(Bytes.toString(entry.getKey()), rLoads);
-      }
-    }
+        rLoads.add(new BalancerRegionLoad(rm));
+        loads.put(Bytes.toString(regionName), rLoads);
+      });
+    });
 
     for(CostFromRegionLoadFunction cost : regionLoadFunctions) {
       cost.setLoads(loads);
@@ -1371,14 +1366,14 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
    */
   abstract static class CostFromRegionLoadFunction extends CostFunction {
 
-    private ClusterStatus clusterStatus = null;
+    private ClusterMetrics clusterStatus = null;
     private Map<String, Deque<BalancerRegionLoad>> loads = null;
     private double[] stats = null;
     CostFromRegionLoadFunction(Configuration conf) {
       super(conf);
     }
 
-    void setClusterStatus(ClusterStatus status) {
+    void setClusterMetrics(ClusterMetrics status) {
       this.clusterStatus = status;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 4110dfd..602af91 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -22,7 +22,6 @@ import com.google.protobuf.Message;
 import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
-
 import java.io.IOException;
 import java.net.InetAddress;
 import java.security.PrivilegedExceptionAction;
@@ -38,7 +37,6 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index a012a4e..31208c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -51,8 +51,8 @@ import org.apache.commons.lang3.time.StopWatch;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -1168,8 +1168,7 @@ public final class Canary implements Tool {
     private void checkWriteTableDistribution() throws IOException {
       if (!admin.tableExists(writeTableName)) {
         int numberOfServers =
-            admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()
-                .size();
+            admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().size();
         if (numberOfServers == 0) {
           throw new IllegalStateException("No live regionservers");
         }
@@ -1180,10 +1179,10 @@ public final class Canary implements Tool {
         admin.enableTable(writeTableName);
       }
 
-      ClusterStatus status =
-          admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER));
-      int numberOfServers = status.getServersSize();
-      if (status.getServers().contains(status.getMaster())) {
+      ClusterMetrics status =
+          admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER));
+      int numberOfServers = status.getLiveServerMetrics().size();
+      if (status.getLiveServerMetrics().containsKey(status.getMasterName())) {
         numberOfServers -= 1;
       }
 
@@ -1502,8 +1501,8 @@ public final class Canary implements Tool {
         }
 
         // get any live regionservers not serving any regions
-        for (ServerName rs : this.admin
-            .getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
+        for (ServerName rs : this.admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+          .getLiveServerMetrics().keySet()) {
           String rsName = rs.getHostname();
           if (!rsAndRMap.containsKey(rsName)) {
             rsAndRMap.put(rsName, Collections.<RegionInfo> emptyList());

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 42faa9f..5ca1ed6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -72,8 +72,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
@@ -224,7 +224,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * Internal resources
    **********************/
   private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());
-  private ClusterStatus status;
+  private ClusterMetrics status;
   private ClusterConnection connection;
   private Admin admin;
   private Table meta;
@@ -525,7 +525,7 @@ public class HBaseFsck extends Configured implements Closeable {
     connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());
     admin = connection.getAdmin();
     meta = connection.getTable(TableName.META_TABLE_NAME);
-    status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS,
+    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,
       Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,
       Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));
   }
@@ -535,7 +535,7 @@ public class HBaseFsck extends Configured implements Closeable {
    */
   private void loadDeployedRegions() throws IOException, InterruptedException {
     // From the master, get a list of all known live region servers
-    Collection<ServerName> regionServers = status.getServers();
+    Collection<ServerName> regionServers = status.getLiveServerMetrics().keySet();
     errors.print("Number of live region servers: " + regionServers.size());
     if (details) {
       for (ServerName rsinfo: regionServers) {
@@ -553,10 +553,10 @@ public class HBaseFsck extends Configured implements Closeable {
     }
 
     // Print the current master name and state
-    errors.print("Master: " + status.getMaster());
+    errors.print("Master: " + status.getMasterName());
 
     // Print the list of all backup masters
-    Collection<ServerName> backupMasters = status.getBackupMasters();
+    Collection<ServerName> backupMasters = status.getBackupMasterNames();
     errors.print("Number of backup masters: " + backupMasters.size());
     if (details) {
       for (ServerName name: backupMasters) {
@@ -566,7 +566,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
     errors.print("Average load: " + status.getAverageLoad());
     errors.print("Number of requests: " + status.getRequestCount());
-    errors.print("Number of regions: " + status.getRegionsCount());
+    errors.print("Number of regions: " + status.getRegionCount());
 
     List<RegionState> rits = status.getRegionStatesInTransition();
     errors.print("Number of regions in transition: " + rits.size());
@@ -2451,7 +2451,8 @@ public class HBaseFsck extends Configured implements Closeable {
         LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI());
         int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication();
         HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
-            admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers(), numReplicas);
+            admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+              .getLiveServerMetrics().keySet(), numReplicas);
 
         tryAssignmentRepair(hbi, "Trying to reassign region...");
       }
@@ -2478,7 +2479,8 @@ public class HBaseFsck extends Configured implements Closeable {
         LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI());
         int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication();
         HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
-            admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers(), numReplicas);
+            admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
+              .getLiveServerMetrics().keySet(), numReplicas);
         tryAssignmentRepair(hbi, "Trying to fix unassigned region...");
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 64f2766..86cb921 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -118,7 +118,7 @@ public class HBaseFsckRepair {
     while (EnvironmentEdgeManager.currentTime() < expiration) {
       try {
         boolean inTransition = false;
-        for (RegionState rs : admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
+        for (RegionState rs : admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION))
                                    .getRegionStatesInTransition()) {
           if (RegionInfo.COMPARATOR.compare(rs.getRegion(), region) == 0) {
             inTransition = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index 75c7dd5..16c256a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -747,7 +747,7 @@ public class RegionMover extends AbstractHBaseTool {
    * @throws IOException
    */
   private void stripMaster(ArrayList<String> regionServers, Admin admin) throws IOException {
-    ServerName master = admin.getClusterStatus(EnumSet.of(Option.MASTER)).getMaster();
+    ServerName master = admin.getClusterMetrics(EnumSet.of(Option.MASTER)).getMasterName();
     String masterHostname = master.getHostname();
     int masterPort = master.getPort();
     try {
@@ -825,7 +825,7 @@ public class RegionMover extends AbstractHBaseTool {
    */
   private ArrayList<String> getServers(Admin admin) throws IOException {
     ArrayList<ServerName> serverInfo = new ArrayList<>(
-        admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers());
+        admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet());
     ArrayList<String> regionServers = new ArrayList<>(serverInfo.size());
     for (ServerName server : serverInfo) {
       regionServers.add(server.getServerName().toLowerCase());

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 2902158..7b9cbb6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -41,8 +41,8 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ClusterMetrics;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
-import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -437,8 +437,8 @@ public class RegionSplitter {
    */
   private static int getRegionServerCount(final Connection connection) throws IOException {
     try (Admin admin = connection.getAdmin()) {
-      ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
-      Collection<ServerName> servers = status.getServers();
+      ClusterMetrics status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
+      Collection<ServerName> servers = status.getLiveServerMetrics().keySet();
       return servers == null || servers.isEmpty()? 0: servers.size();
     }
   }