You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2017/12/22 13:45:14 UTC

[16/26] hbase git commit: HBASE-19496 Reusing the ByteBuffer in rpc layer corrupt the ServerLoad and RegionLoad

HBASE-19496 Reusing the ByteBuffer in rpc layer corrupt the ServerLoad and RegionLoad


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/448ba3a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/448ba3a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/448ba3a7

Branch: refs/heads/HBASE-19397
Commit: 448ba3a78f50df2ffac874c3768e9f50d52b15f6
Parents: 7501e64
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Thu Dec 21 20:46:57 2017 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Fri Dec 22 18:54:25 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/ClusterMetrics.java | 198 ++++++++
 .../hadoop/hbase/ClusterMetricsBuilder.java     | 397 ++++++++++++++++
 .../org/apache/hadoop/hbase/ClusterStatus.java  | 363 ++++++---------
 .../org/apache/hadoop/hbase/RegionLoad.java     | 279 +++++++++---
 .../org/apache/hadoop/hbase/RegionMetrics.java  | 147 ++++++
 .../hadoop/hbase/RegionMetricsBuilder.java      | 437 ++++++++++++++++++
 .../org/apache/hadoop/hbase/ServerLoad.java     | 448 +++++++++++++------
 .../org/apache/hadoop/hbase/ServerMetrics.java  |  90 ++++
 .../hadoop/hbase/ServerMetricsBuilder.java      | 352 +++++++++++++++
 .../main/java/org/apache/hadoop/hbase/Size.java | 158 +++++++
 .../org/apache/hadoop/hbase/client/Admin.java   |   5 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |   4 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java    |   4 +-
 .../hbase/client/ClusterStatusListener.java     |   2 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   7 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |   7 +-
 .../hbase/replication/ReplicationLoadSink.java  |   8 +-
 .../replication/ReplicationLoadSource.java      |  16 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java     | 256 ++---------
 .../hbase/shaded/protobuf/RequestConverter.java |   5 +-
 .../chaos/actions/MoveRegionsOfTableAction.java |   3 +-
 .../mapreduce/IntegrationTestBulkLoad.java      |   2 +-
 .../test/IntegrationTestBigLinkedList.java      |   2 +-
 .../rest/StorageClusterStatusResource.java      |  12 +-
 .../rest/StorageClusterVersionResource.java     |   8 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |  12 +-
 .../tmpl/master/BackupMasterStatusTmpl.jamon    |   4 +-
 .../hbase/master/ClusterStatusPublisher.java    |  19 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  45 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   7 +-
 .../hbase/master/RegionPlacementMaintainer.java |   4 +-
 .../hadoop/hbase/master/ServerManager.java      |   3 +-
 .../hbase/regionserver/HRegionServer.java       |   9 +-
 .../hadoop/hbase/regionserver/HStore.java       |   2 +-
 .../MetricsRegionServerWrapperImpl.java         |   2 +-
 .../apache/hadoop/hbase/regionserver/Store.java |   4 +-
 .../org/apache/hadoop/hbase/tool/Canary.java    |   4 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   5 +-
 .../hadoop/hbase/util/HBaseFsckRepair.java      |   5 +-
 .../apache/hadoop/hbase/util/RegionMover.java   |   3 +-
 .../hadoop/hbase/util/RegionSplitter.java       |  11 +-
 .../hadoop/hbase/HBaseTestingUtility.java       |   2 +-
 .../hadoop/hbase/TestClientClusterMetrics.java  | 233 ++++++++++
 .../hadoop/hbase/TestClientClusterStatus.java   | 258 +++++++++++
 .../org/apache/hadoop/hbase/TestRegionLoad.java |  24 +-
 .../apache/hadoop/hbase/TestRegionMetrics.java  | 130 ++++++
 .../org/apache/hadoop/hbase/TestServerLoad.java |  65 +--
 .../apache/hadoop/hbase/TestServerMetrics.java  | 114 +++++
 .../java/org/apache/hadoop/hbase/TestSize.java  |  84 ++++
 .../apache/hadoop/hbase/client/TestAdmin2.java  |  11 +-
 .../hbase/client/TestAsyncClusterAdminApi.java  |   8 +-
 .../client/TestAsyncDecommissionAdminApi.java   |   3 +-
 .../hbase/client/TestClientClusterStatus.java   | 242 ----------
 .../hadoop/hbase/client/TestFromClientSide.java |   2 +-
 .../hbase/client/TestMetaWithReplicas.java      |   2 +-
 .../hbase/client/TestServerLoadDurability.java  | 117 +++++
 .../hbase/master/TestAssignmentListener.java    |   5 +-
 .../hbase/master/TestMasterNoCluster.java       |   6 +-
 .../TestMasterOperationsForRegionReplicas.java  |   4 +-
 .../master/assignment/MockMasterServices.java   |   7 +-
 .../TestFavoredStochasticBalancerPickers.java   |  11 +-
 .../TestFavoredStochasticLoadBalancer.java      |   4 +-
 .../TestRegionServerReadRequestMetrics.java     |  25 +-
 .../replication/TestMasterReplication.java      |   5 +-
 .../replication/TestReplicationStatus.java      |   3 +-
 .../hadoop/hbase/util/BaseTestHBaseFsck.java    |   4 +-
 .../util/TestMiniClusterLoadSequential.java     |   5 +-
 67 files changed, 3608 insertions(+), 1115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
new file mode 100644
index 0000000..103c107
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
@@ -0,0 +1,198 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Metrics information on the HBase cluster.
+ * <p>
+ * <tt>ClusterMetrics</tt> provides clients with information such as:
+ * <ul>
+ * <li>The count and names of region servers in the cluster.</li>
+ * <li>The count and names of dead region servers in the cluster.</li>
+ * <li>The name of the active master for the cluster.</li>
+ * <li>The name(s) of the backup master(s) for the cluster, if they exist.</li>
+ * <li>The average cluster load.</li>
+ * <li>The number of regions deployed on the cluster.</li>
+ * <li>The number of requests since last report.</li>
+ * <li>Detailed region server loading and resource usage information,
+ *  per server and per region.</li>
+ * <li>Regions in transition at master</li>
+ * <li>The unique cluster ID</li>
+ * </ul>
+ * <tt>{@link Option}</tt> provides a way to get desired ClusterStatus information.
+ * The following codes will get all the cluster information.
+ * <pre>
+ * {@code
+ * // Original version still works
+ * Admin admin = connection.getAdmin();
+ * ClusterMetrics metrics = admin.getClusterStatus();
+ * // or below, a new version which has the same effects
+ * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
+ * }
+ * </pre>
+ * If information about live servers is the only wanted.
+ * then codes in the following way:
+ * <pre>
+ * {@code
+ * Admin admin = connection.getAdmin();
+ * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
+ * }
+ * </pre>
+ */
+@InterfaceAudience.Public
+public interface ClusterMetrics {
+
+  /**
+   * @return the HBase version string as reported by the HMaster
+   */
+  @Nullable
+  String getHBaseVersion();
+
+  /**
+   * @return the names of region servers on the dead list
+   */
+  List<ServerName> getDeadServerNames();
+
+  /**
+   * @return the names of region servers on the live list
+   */
+  Map<ServerName, ServerMetrics> getLiveServerMetrics();
+
+  /**
+   * @return the number of regions deployed on the cluster
+   */
+  default int getRegionCount() {
+    return getLiveServerMetrics().entrySet().stream()
+        .mapToInt(v -> v.getValue().getRegionMetrics().size()).sum();
+  }
+
+  /**
+   * @return the number of requests since last report
+   */
+  default long getRequestCount() {
+    return getLiveServerMetrics().entrySet().stream()
+        .flatMap(v -> v.getValue().getRegionMetrics().values().stream())
+        .mapToLong(RegionMetrics::getRequestCount).sum();
+  }
+
+  /**
+   * Returns detailed information about the current master {@link ServerName}.
+   * @return current master information if it exists
+   */
+  @Nullable
+  ServerName getMasterName();
+
+  /**
+   * @return the names of backup masters
+   */
+  List<ServerName> getBackupMasterNames();
+
+  @InterfaceAudience.Private
+  List<RegionState> getRegionStatesInTransition();
+
+  @Nullable
+  String getClusterId();
+
+  List<String> getMasterCoprocessorNames();
+
+  default long getLastMajorCompactionTimestamp(TableName table) {
+    return getLiveServerMetrics().values().stream()
+        .flatMap(s -> s.getRegionMetrics().values().stream())
+        .filter(r -> RegionInfo.getTable(r.getRegionName()).equals(table))
+        .mapToLong(RegionMetrics::getLastMajorCompactionTimestamp).min().orElse(0);
+  }
+
+  default long getLastMajorCompactionTimestamp(byte[] regionName) {
+    return getLiveServerMetrics().values().stream()
+        .filter(s -> s.getRegionMetrics().containsKey(regionName))
+        .findAny()
+        .map(s -> s.getRegionMetrics().get(regionName).getLastMajorCompactionTimestamp())
+        .orElse(0L);
+  }
+
+  @Nullable
+  Boolean getBalancerOn();
+
+  int getMasterInfoPort();
+
+  /**
+   * @return the average cluster load
+   */
+  default double getAverageLoad() {
+    int serverSize = getLiveServerMetrics().size();
+    if (serverSize == 0) {
+      return 0;
+    }
+    return (double)getRegionCount() / (double)serverSize;
+  }
+
+  /**
+   * Kinds of ClusterMetrics
+   */
+  enum Option {
+    /**
+     * metrics about hbase version
+     */
+    HBASE_VERSION,
+    /**
+     * metrics about cluster id
+     */
+    CLUSTER_ID,
+    /**
+     * metrics about balancer is on or not
+     */
+    BALANCER_ON,
+    /**
+     * metrics about live region servers
+     */
+    LIVE_SERVERS,
+    /**
+     * metrics about dead region servers
+     */
+    DEAD_SERVERS,
+    /**
+     * metrics about master name
+     */
+    MASTER,
+    /**
+     * metrics about backup masters name
+     */
+    BACKUP_MASTERS,
+    /**
+     * metrics about master coprocessors
+     */
+    MASTER_COPROCESSORS,
+    /**
+     * metrics about regions in transition
+     */
+    REGIONS_IN_TRANSITION,
+    /**
+     * metrics info port
+     */
+    MASTER_INFO_PORT
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
new file mode 100644
index 0000000..ed669a5
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
@@ -0,0 +1,397 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+
+@InterfaceAudience.Private
+public final class ClusterMetricsBuilder {
+
+  public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) {
+    ClusterStatusProtos.ClusterStatus.Builder builder
+        = ClusterStatusProtos.ClusterStatus.newBuilder()
+        .addAllBackupMasters(metrics.getBackupMasterNames().stream()
+            .map(ProtobufUtil::toServerName).collect(Collectors.toList()))
+        .addAllDeadServers(metrics.getDeadServerNames().stream()
+            .map(ProtobufUtil::toServerName).collect(Collectors.toList()))
+        .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream()
+            .map(s -> ClusterStatusProtos.LiveServerInfo
+                .newBuilder()
+                .setServer(ProtobufUtil.toServerName(s.getKey()))
+                .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue()))
+                .build())
+            .collect(Collectors.toList()))
+        .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream()
+            .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build())
+            .collect(Collectors.toList()))
+        .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream()
+            .map(r -> ClusterStatusProtos.RegionInTransition
+                .newBuilder()
+                .setSpec(HBaseProtos.RegionSpecifier
+                    .newBuilder()
+                    .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
+                    .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName()))
+                    .build())
+                .setRegionState(r.convert())
+                .build())
+            .collect(Collectors.toList()))
+        .setMasterInfoPort(metrics.getMasterInfoPort());
+    if (metrics.getMasterName() != null) {
+      builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
+    }
+    if (metrics.getBalancerOn() != null) {
+      builder.setBalancerOn(metrics.getBalancerOn());
+    }
+    if (metrics.getClusterId() != null) {
+      builder.setClusterId(new ClusterId(metrics.getClusterId()).convert());
+    }
+    if (metrics.getHBaseVersion() != null) {
+      builder.setHbaseVersion(
+          FSProtos.HBaseVersionFileContent.newBuilder()
+              .setVersion(metrics.getHBaseVersion()));
+    }
+    return builder.build();
+  }
+
+  public static ClusterMetrics toClusterMetrics(
+      ClusterStatusProtos.ClusterStatus proto) {
+    ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
+    builder.setLiveServerMetrics(proto.getLiveServersList().stream()
+        .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()),
+            ServerMetricsBuilder::toServerMetrics)))
+        .setDeadServerNames(proto.getDeadServersList().stream()
+            .map(ProtobufUtil::toServerName)
+            .collect(Collectors.toList()))
+        .setBackerMasterNames(proto.getBackupMastersList().stream()
+            .map(ProtobufUtil::toServerName)
+            .collect(Collectors.toList()))
+        .setRegionsInTransition(proto.getRegionsInTransitionList().stream()
+            .map(ClusterStatusProtos.RegionInTransition::getRegionState)
+            .map(RegionState::convert)
+            .collect(Collectors.toList()))
+        .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream()
+            .map(HBaseProtos.Coprocessor::getName)
+            .collect(Collectors.toList()));
+    if (proto.hasClusterId()) {
+      builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString());
+    }
+
+    if (proto.hasHbaseVersion()) {
+      builder.setHBaseVersion(proto.getHbaseVersion().getVersion());
+    }
+
+    if (proto.hasMaster()) {
+      builder.setMasterName(ProtobufUtil.toServerName(proto.getMaster()));
+    }
+
+    if (proto.hasBalancerOn()) {
+      builder.setBalancerOn(proto.getBalancerOn());
+    }
+
+    if (proto.hasMasterInfoPort()) {
+      builder.setMasterInfoPort(proto.getMasterInfoPort());
+    }
+    return builder.build();
+  }
+
+  /**
+   * Convert ClusterStatusProtos.Option to ClusterMetrics.Option
+   * @param option a ClusterStatusProtos.Option
+   * @return converted ClusterMetrics.Option
+   */
+  public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) {
+    switch (option) {
+      case HBASE_VERSION: return ClusterMetrics.Option.HBASE_VERSION;
+      case LIVE_SERVERS: return ClusterMetrics.Option.LIVE_SERVERS;
+      case DEAD_SERVERS: return ClusterMetrics.Option.DEAD_SERVERS;
+      case REGIONS_IN_TRANSITION: return ClusterMetrics.Option.REGIONS_IN_TRANSITION;
+      case CLUSTER_ID: return ClusterMetrics.Option.CLUSTER_ID;
+      case MASTER_COPROCESSORS: return ClusterMetrics.Option.MASTER_COPROCESSORS;
+      case MASTER: return ClusterMetrics.Option.MASTER;
+      case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS;
+      case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON;
+      case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT;
+      // should not reach here
+      default: throw new IllegalArgumentException("Invalid option: " + option);
+    }
+  }
+
+  /**
+   * Convert ClusterMetrics.Option to ClusterStatusProtos.Option
+   * @param option a ClusterMetrics.Option
+   * @return converted ClusterStatusProtos.Option
+   */
+  public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) {
+    switch (option) {
+      case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION;
+      case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS;
+      case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS;
+      case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION;
+      case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID;
+      case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS;
+      case MASTER: return ClusterStatusProtos.Option.MASTER;
+      case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS;
+      case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
+      case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT;
+      // should not reach here
+      default: throw new IllegalArgumentException("Invalid option: " + option);
+    }
+  }
+
+  /**
+   * Convert a list of ClusterStatusProtos.Option to an enum set of ClusterMetrics.Option
+   * @param options the pb options
+   * @return an enum set of ClusterMetrics.Option
+   */
+  public static EnumSet<ClusterMetrics.Option> toOptions(List<ClusterStatusProtos.Option> options) {
+    return options.stream().map(ClusterMetricsBuilder::toOption)
+        .collect(Collectors.toCollection(() -> EnumSet.noneOf(ClusterMetrics.Option.class)));
+  }
+
+  /**
+   * Convert an enum set of ClusterMetrics.Option to a list of ClusterStatusProtos.Option
+   * @param options the ClusterMetrics options
+   * @return a list of ClusterStatusProtos.Option
+   */
+  public static List<ClusterStatusProtos.Option> toOptions(EnumSet<ClusterMetrics.Option> options) {
+    return options.stream().map(ClusterMetricsBuilder::toOption).collect(Collectors.toList());
+  }
+
+  public static ClusterMetricsBuilder newBuilder() {
+    return new ClusterMetricsBuilder();
+  }
+  @Nullable
+  private String hbaseVersion;
+  private List<ServerName> deadServerNames = Collections.emptyList();
+  private Map<ServerName, ServerMetrics> liveServerMetrics = new TreeMap<>();
+  @Nullable
+  private ServerName masterName;
+  private List<ServerName> backupMasterNames = Collections.emptyList();
+  private List<RegionState> regionsInTransition = Collections.emptyList();
+  @Nullable
+  private String clusterId;
+  private List<String> masterCoprocessorNames = Collections.emptyList();
+  @Nullable
+  private Boolean balancerOn;
+  private int masterInfoPort;
+
+  private ClusterMetricsBuilder() {
+  }
+  public ClusterMetricsBuilder setHBaseVersion(String value) {
+    this.hbaseVersion = value;
+    return this;
+  }
+  public ClusterMetricsBuilder setDeadServerNames(List<ServerName> value) {
+    this.deadServerNames = value;
+    return this;
+  }
+
+  public ClusterMetricsBuilder setLiveServerMetrics(Map<ServerName, ServerMetrics> value) {
+    liveServerMetrics.putAll(value);
+    return this;
+  }
+
+  public ClusterMetricsBuilder setMasterName(ServerName value) {
+    this.masterName = value;
+    return this;
+  }
+  public ClusterMetricsBuilder setBackerMasterNames(List<ServerName> value) {
+    this.backupMasterNames = value;
+    return this;
+  }
+  public ClusterMetricsBuilder setRegionsInTransition(List<RegionState> value) {
+    this.regionsInTransition = value;
+    return this;
+  }
+  public ClusterMetricsBuilder setClusterId(String value) {
+    this.clusterId = value;
+    return this;
+  }
+  public ClusterMetricsBuilder setMasterCoprocessorNames(List<String> value) {
+    this.masterCoprocessorNames = value;
+    return this;
+  }
+  public ClusterMetricsBuilder setBalancerOn(@Nullable Boolean value) {
+    this.balancerOn = value;
+    return this;
+  }
+  public ClusterMetricsBuilder setMasterInfoPort(int value) {
+    this.masterInfoPort = value;
+    return this;
+  }
+  public ClusterMetrics build() {
+    return new ClusterMetricsImpl(
+        hbaseVersion,
+        deadServerNames,
+        liveServerMetrics,
+        masterName,
+        backupMasterNames,
+        regionsInTransition,
+        clusterId,
+        masterCoprocessorNames,
+        balancerOn,
+        masterInfoPort);
+  }
+  private static class ClusterMetricsImpl implements ClusterMetrics {
+    @Nullable
+    private final String hbaseVersion;
+    private final List<ServerName> deadServerNames;
+    private final Map<ServerName, ServerMetrics> liveServerMetrics;
+    @Nullable
+    private final ServerName masterName;
+    private final List<ServerName> backupMasterNames;
+    private final List<RegionState> regionsInTransition;
+    @Nullable
+    private final String clusterId;
+    private final List<String> masterCoprocessorNames;
+    @Nullable
+    private final Boolean balancerOn;
+    private final int masterInfoPort;
+
+    ClusterMetricsImpl(String hbaseVersion, List<ServerName> deadServerNames,
+        Map<ServerName, ServerMetrics> liveServerMetrics,
+        ServerName masterName,
+        List<ServerName> backupMasterNames,
+        List<RegionState> regionsInTransition,
+        String clusterId,
+        List<String> masterCoprocessorNames,
+        Boolean balancerOn,
+        int masterInfoPort) {
+      this.hbaseVersion = hbaseVersion;
+      this.deadServerNames = Preconditions.checkNotNull(deadServerNames);
+      this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics);
+      this.masterName = masterName;
+      this.backupMasterNames = Preconditions.checkNotNull(backupMasterNames);
+      this.regionsInTransition = Preconditions.checkNotNull(regionsInTransition);
+      this.clusterId = clusterId;
+      this.masterCoprocessorNames = Preconditions.checkNotNull(masterCoprocessorNames);
+      this.balancerOn = balancerOn;
+      this.masterInfoPort = masterInfoPort;
+    }
+
+    @Override
+    public String getHBaseVersion() {
+      return hbaseVersion;
+    }
+
+    @Override
+    public List<ServerName> getDeadServerNames() {
+      return Collections.unmodifiableList(deadServerNames);
+    }
+
+    @Override
+    public Map<ServerName, ServerMetrics> getLiveServerMetrics() {
+      return Collections.unmodifiableMap(liveServerMetrics);
+    }
+
+    @Override
+    public ServerName getMasterName() {
+      return masterName;
+    }
+
+    @Override
+    public List<ServerName> getBackupMasterNames() {
+      return Collections.unmodifiableList(backupMasterNames);
+    }
+
+    @Override
+    public List<RegionState> getRegionStatesInTransition() {
+      return Collections.unmodifiableList(regionsInTransition);
+    }
+
+    @Override
+    public String getClusterId() {
+      return clusterId;
+    }
+
+    @Override
+    public List<String> getMasterCoprocessorNames() {
+      return Collections.unmodifiableList(masterCoprocessorNames);
+    }
+
+    @Override
+    public Boolean getBalancerOn() {
+      return balancerOn;
+    }
+
+    @Override
+    public int getMasterInfoPort() {
+      return masterInfoPort;
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder(1024);
+      sb.append("Master: " + getMasterName());
+
+      int backupMastersSize = getBackupMasterNames().size();
+      sb.append("\nNumber of backup masters: " + backupMastersSize);
+      if (backupMastersSize > 0) {
+        for (ServerName serverName: getBackupMasterNames()) {
+          sb.append("\n  " + serverName);
+        }
+      }
+
+      int serversSize = getLiveServerMetrics().size();
+      sb.append("\nNumber of live region servers: " + serversSize);
+      if (serversSize > 0) {
+        for (ServerName serverName : getLiveServerMetrics().keySet()) {
+          sb.append("\n  " + serverName.getServerName());
+        }
+      }
+
+      int deadServerSize = getDeadServerNames().size();
+      sb.append("\nNumber of dead region servers: " + deadServerSize);
+      if (deadServerSize > 0) {
+        for (ServerName serverName : getDeadServerNames()) {
+          sb.append("\n  " + serverName);
+        }
+      }
+
+      sb.append("\nAverage load: " + getAverageLoad());
+      sb.append("\nNumber of requests: " + getRequestCount());
+      sb.append("\nNumber of regions: " + getRegionCount());
+
+      int ritSize = getRegionStatesInTransition().size();
+      sb.append("\nNumber of regions in transition: " + ritSize);
+      if (ritSize > 0) {
+        for (RegionState state : getRegionStatesInTransition()) {
+          sb.append("\n  " + state.toDescriptiveString());
+        }
+      }
+      return sb.toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 6b4c03a..8cbf302 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -19,16 +19,17 @@
 
 package org.apache.hadoop.hbase;
 
+import edu.umd.cs.findbugs.annotations.Nullable;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.master.RegionState;
 
 /**
  * Status information on the HBase cluster.
@@ -47,7 +48,7 @@ import org.apache.hadoop.hbase.master.RegionState;
  * <li>Regions in transition at master</li>
  * <li>The unique cluster ID</li>
  * </ul>
- * <tt>{@link Option}</tt> provides a way to get desired ClusterStatus information.
+ * <tt>{@link ClusterMetrics.Option}</tt> provides a way to get desired ClusterStatus information.
  * The following codes will get all the cluster information.
  * <pre>
  * {@code
@@ -66,28 +67,20 @@ import org.apache.hadoop.hbase.master.RegionState;
  * ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
  * }
  * </pre>
+ * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+ *             Use {@link ClusterMetrics} instead.
  */
 @InterfaceAudience.Public
-public class ClusterStatus {
+@Deprecated
+public class ClusterStatus implements ClusterMetrics {
 
   // TODO: remove this in 3.0
   private static final byte VERSION = 2;
 
-  private String hbaseVersion;
-  private Map<ServerName, ServerLoad> liveServers;
-  private List<ServerName> deadServers;
-  private ServerName master;
-  private List<ServerName> backupMasters;
-  private List<RegionState> intransition;
-  private String clusterId;
-  private String[] masterCoprocessors;
-  private Boolean balancerOn;
-  private int masterInfoPort;
+  private final ClusterMetrics metrics;
 
   /**
-   * Use {@link ClusterStatus.Builder} to construct a ClusterStatus instead.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
-   *             (<a href="https://issues.apache.org/jira/browse/HBASE-15511">HBASE-15511</a>).
    */
   @Deprecated
   public ClusterStatus(final String hbaseVersion, final String clusterid,
@@ -100,61 +93,52 @@ public class ClusterStatus {
       final Boolean balancerOn,
       final int masterInfoPort) {
     // TODO: make this constructor private
-    this.hbaseVersion = hbaseVersion;
-    this.liveServers = servers;
-    this.deadServers = new ArrayList<>(deadServers);
-    this.master = master;
-    this.backupMasters = new ArrayList<>(backupMasters);
-    this.intransition = rit;
-    this.clusterId = clusterid;
-    this.masterCoprocessors = masterCoprocessors;
-    this.balancerOn = balancerOn;
-    this.masterInfoPort = masterInfoPort;
+    this(ClusterMetricsBuilder.newBuilder().setHBaseVersion(hbaseVersion)
+      .setDeadServerNames(new ArrayList<>(deadServers))
+      .setLiveServerMetrics(servers.entrySet().stream()
+      .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())))
+      .setBackerMasterNames(new ArrayList<>(backupMasters)).setBalancerOn(balancerOn)
+      .setClusterId(clusterid)
+      .setMasterCoprocessorNames(Arrays.asList(masterCoprocessors))
+      .setMasterName(master)
+      .setMasterInfoPort(masterInfoPort)
+      .setRegionsInTransition(rit)
+      .build());
   }
 
-  private ClusterStatus(final String hbaseVersion, final String clusterid,
-      final Map<ServerName, ServerLoad> servers,
-      final List<ServerName> deadServers,
-      final ServerName master,
-      final List<ServerName> backupMasters,
-      final List<RegionState> rit,
-      final String[] masterCoprocessors,
-      final Boolean balancerOn,
-      final int masterInfoPort) {
-    this.hbaseVersion = hbaseVersion;
-    this.liveServers = servers;
-    this.deadServers = deadServers;
-    this.master = master;
-    this.backupMasters = backupMasters;
-    this.intransition = rit;
-    this.clusterId = clusterid;
-    this.masterCoprocessors = masterCoprocessors;
-    this.balancerOn = balancerOn;
-    this.masterInfoPort = masterInfoPort;
+  @InterfaceAudience.Private
+  public ClusterStatus(ClusterMetrics metrics) {
+    this.metrics = metrics;
   }
 
   /**
    * @return the names of region servers on the dead list
    */
+  @Override
   public List<ServerName> getDeadServerNames() {
-    if (deadServers == null) {
-      return Collections.emptyList();
-    }
-    return Collections.unmodifiableList(deadServers);
+    return metrics.getDeadServerNames();
+  }
+
+  @Override
+  public Map<ServerName, ServerMetrics> getLiveServerMetrics() {
+    return metrics.getLiveServerMetrics();
   }
 
   /**
-   * @return the number of region servers in the cluster
-   */
+  * @return the number of region servers in the cluster
+  * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+  *             Use {@link #getLiveServerMetrics()}.
+  */
+  @Deprecated
   public int getServersSize() {
-    return liveServers != null ? liveServers.size() : 0;
+    return metrics.getLiveServerMetrics().size();
   }
 
   /**
    * @return the number of dead region servers in the cluster
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    *             (<a href="https://issues.apache.org/jira/browse/HBASE-13656">HBASE-13656</a>).
-   *             Use {@link #getDeadServersSize()}.
+   *             Use {@link #getDeadServerNames()}.
    */
   @Deprecated
   public int getDeadServers() {
@@ -163,32 +147,22 @@ public class ClusterStatus {
 
   /**
    * @return the number of dead region servers in the cluster
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getDeadServerNames()}.
    */
+  @Deprecated
   public int getDeadServersSize() {
-    return deadServers != null ? deadServers.size() : 0;
-  }
-
-
-  /**
-   * @return the average cluster load
-   */
-  public double getAverageLoad() {
-    int load = getRegionsCount();
-    int serverSize = getServersSize();
-    return serverSize != 0 ? (double)load / (double)serverSize : 0.0;
+    return metrics.getDeadServerNames().size();
   }
 
   /**
    * @return the number of regions deployed on the cluster
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRegionCount()}.
    */
+  @Deprecated
   public int getRegionsCount() {
-    int count = 0;
-    if (liveServers != null && !liveServers.isEmpty()) {
-      for (Map.Entry<ServerName, ServerLoad> e: this.liveServers.entrySet()) {
-        count = count + e.getValue().getNumberOfRegions();
-      }
-    }
-    return count;
+    return getRegionCount();
   }
 
   /**
@@ -201,20 +175,32 @@ public class ClusterStatus {
     return (int) getRequestCount();
   }
 
-  public long getRequestCount() {
-    if (liveServers == null) {
-      return 0L;
-    }
-    return liveServers.values().stream()
-            .mapToLong(ServerLoad::getNumberOfRequests)
-            .sum();
+  @Nullable
+  @Override
+  public ServerName getMasterName() {
+    return metrics.getMasterName();
+  }
+
+  @Override
+  public List<ServerName> getBackupMasterNames() {
+    return metrics.getBackupMasterNames();
+  }
+
+  @Override
+  public List<RegionState> getRegionStatesInTransition() {
+    return metrics.getRegionStatesInTransition();
   }
 
   /**
    * @return the HBase version string as reported by the HMaster
    */
   public String getHBaseVersion() {
-    return hbaseVersion;
+    return metrics.getHBaseVersion();
+  }
+
+  private Map<ServerName, ServerLoad> getLiveServerLoads() {
+    return metrics.getLiveServerMetrics().entrySet().stream()
+      .collect(Collectors.toMap(e -> e.getKey(), e -> new ServerLoad(e.getValue())));
   }
 
   @Override
@@ -227,7 +213,7 @@ public class ClusterStatus {
     }
     ClusterStatus other = (ClusterStatus) o;
     return Objects.equal(getHBaseVersion(), other.getHBaseVersion()) &&
-      Objects.equal(this.liveServers, other.liveServers) &&
+      Objects.equal(getLiveServerLoads(), other.getLiveServerLoads()) &&
       getDeadServerNames().containsAll(other.getDeadServerNames()) &&
       Arrays.equals(getMasterCoprocessors(), other.getMasterCoprocessors()) &&
       Objects.equal(getMaster(), other.getMaster()) &&
@@ -238,8 +224,7 @@ public class ClusterStatus {
 
   @Override
   public int hashCode() {
-    return Objects.hashCode(hbaseVersion, liveServers, deadServers, master, backupMasters,
-      clusterId, masterInfoPort);
+    return metrics.hashCode();
   }
 
   /**
@@ -251,111 +236,123 @@ public class ClusterStatus {
     return VERSION;
   }
 
-  //
-  // Getters
-  //
-
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getLiveServerMetrics()} instead.
+   */
+  @Deprecated
   public Collection<ServerName> getServers() {
-    if (liveServers == null) {
-      return Collections.emptyList();
-    }
-    return Collections.unmodifiableCollection(this.liveServers.keySet());
+    return metrics.getLiveServerMetrics().keySet();
   }
 
   /**
    * Returns detailed information about the current master {@link ServerName}.
    * @return current master information if it exists
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getMasterName} instead.
    */
+  @Deprecated
   public ServerName getMaster() {
-    return this.master;
+    return metrics.getMasterName();
   }
 
   /**
    * @return the number of backup masters in the cluster
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getBackupMasterNames} instead.
    */
+  @Deprecated
   public int getBackupMastersSize() {
-    return backupMasters != null ? backupMasters.size() : 0;
+    return metrics.getBackupMasterNames().size();
   }
 
   /**
    * @return the names of backup masters
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getBackupMasterNames} instead.
    */
+  @Deprecated
   public List<ServerName> getBackupMasters() {
-    if (backupMasters == null) {
-      return Collections.emptyList();
-    }
-    return Collections.unmodifiableList(this.backupMasters);
+    return metrics.getBackupMasterNames();
   }
 
   /**
    * @param sn
    * @return Server's load or null if not found.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getLiveServerMetrics} instead.
    */
+  @Deprecated
   public ServerLoad getLoad(final ServerName sn) {
-    return liveServers != null ? liveServers.get(sn) : null;
+    ServerMetrics serverMetrics = metrics.getLiveServerMetrics().get(sn);
+    return serverMetrics == null ? null : new ServerLoad(serverMetrics);
   }
 
-  @InterfaceAudience.Private
-  public List<RegionState> getRegionsInTransition() {
-    if (intransition == null) {
-      return Collections.emptyList();
-    }
-    return Collections.unmodifiableList(intransition);
+  public String getClusterId() {
+    return metrics.getClusterId();
   }
 
-  public String getClusterId() {
-    return clusterId;
+  @Override
+  public List<String> getMasterCoprocessorNames() {
+    return metrics.getMasterCoprocessorNames();
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getMasterCoprocessorNames} instead.
+   */
+  @Deprecated
   public String[] getMasterCoprocessors() {
-    return masterCoprocessors;
+    List<String> rval = metrics.getMasterCoprocessorNames();
+    return rval.toArray(new String[rval.size()]);
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getLastMajorCompactionTimestamp(TableName)} instead.
+   */
+  @Deprecated
   public long getLastMajorCompactionTsForTable(TableName table) {
-    long result = Long.MAX_VALUE;
-    for (ServerName server : getServers()) {
-      ServerLoad load = getLoad(server);
-      for (RegionLoad rl : load.getRegionsLoad().values()) {
-        if (table.equals(HRegionInfo.getTable(rl.getName()))) {
-          result = Math.min(result, rl.getLastMajorCompactionTs());
-        }
-      }
-    }
-    return result == Long.MAX_VALUE ? 0 : result;
+    return metrics.getLastMajorCompactionTimestamp(table);
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getLastMajorCompactionTimestamp(byte[])} instead.
+   */
+  @Deprecated
   public long getLastMajorCompactionTsForRegion(final byte[] region) {
-    for (ServerName server : getServers()) {
-      ServerLoad load = getLoad(server);
-      RegionLoad rl = load.getRegionsLoad().get(region);
-      if (rl != null) {
-        return rl.getLastMajorCompactionTs();
-      }
-    }
-    return 0;
+    return metrics.getLastMajorCompactionTimestamp(region);
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             No flag in 2.0
+   */
+  @Deprecated
   public boolean isBalancerOn() {
-    return balancerOn != null && balancerOn;
+    return metrics.getBalancerOn() != null && metrics.getBalancerOn();
   }
 
+  @Override
   public Boolean getBalancerOn() {
-    return balancerOn;
+    return metrics.getBalancerOn();
   }
 
+  @Override
   public int getMasterInfoPort() {
-    return masterInfoPort;
+    return metrics.getMasterInfoPort();
   }
 
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder(1024);
-    sb.append("Master: " + master);
+    sb.append("Master: " + metrics.getMasterName());
 
     int backupMastersSize = getBackupMastersSize();
     sb.append("\nNumber of backup masters: " + backupMastersSize);
     if (backupMastersSize > 0) {
-      for (ServerName serverName: backupMasters) {
+      for (ServerName serverName: metrics.getBackupMasterNames()) {
         sb.append("\n  " + serverName);
       }
     }
@@ -363,15 +360,15 @@ public class ClusterStatus {
     int serversSize = getServersSize();
     sb.append("\nNumber of live region servers: " + serversSize);
     if (serversSize > 0) {
-      for (ServerName serverName: liveServers.keySet()) {
+      for (ServerName serverName : metrics.getLiveServerMetrics().keySet()) {
         sb.append("\n  " + serverName.getServerName());
       }
     }
 
-    int deadServerSize = getDeadServersSize();
+    int deadServerSize = metrics.getDeadServerNames().size();
     sb.append("\nNumber of dead region servers: " + deadServerSize);
     if (deadServerSize > 0) {
-      for (ServerName serverName: deadServers) {
+      for (ServerName serverName : metrics.getDeadServerNames()) {
         sb.append("\n  " + serverName);
       }
     }
@@ -380,109 +377,13 @@ public class ClusterStatus {
     sb.append("\nNumber of requests: " + getRequestCount());
     sb.append("\nNumber of regions: " + getRegionsCount());
 
-    int ritSize = (intransition != null) ? intransition.size() : 0;
+    int ritSize = metrics.getRegionStatesInTransition().size();
     sb.append("\nNumber of regions in transition: " + ritSize);
     if (ritSize > 0) {
-      for (RegionState state: intransition) {
+      for (RegionState state: metrics.getRegionStatesInTransition()) {
         sb.append("\n  " + state.toDescriptiveString());
       }
     }
     return sb.toString();
   }
-
-  @InterfaceAudience.Private
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder for construct a ClusterStatus.
-   */
-  @InterfaceAudience.Private
-  public static class Builder {
-    private String hbaseVersion = null;
-    private Map<ServerName, ServerLoad> liveServers = null;
-    private List<ServerName> deadServers = null;
-    private ServerName master = null;
-    private List<ServerName> backupMasters = null;
-    private List<RegionState> intransition = null;
-    private String clusterId = null;
-    private String[] masterCoprocessors = null;
-    private Boolean balancerOn = null;
-    private int masterInfoPort = -1;
-
-    private Builder() {}
-
-    public Builder setHBaseVersion(String hbaseVersion) {
-      this.hbaseVersion = hbaseVersion;
-      return this;
-    }
-
-    public Builder setLiveServers(Map<ServerName, ServerLoad> liveServers) {
-      this.liveServers = liveServers;
-      return this;
-    }
-
-    public Builder setDeadServers(List<ServerName> deadServers) {
-      this.deadServers = deadServers;
-      return this;
-    }
-
-    public Builder setMaster(ServerName master) {
-      this.master = master;
-      return this;
-    }
-
-    public Builder setBackupMasters(List<ServerName> backupMasters) {
-      this.backupMasters = backupMasters;
-      return this;
-    }
-
-    public Builder setRegionState(List<RegionState> intransition) {
-      this.intransition = intransition;
-      return this;
-    }
-
-    public Builder setClusterId(String clusterId) {
-      this.clusterId = clusterId;
-      return this;
-    }
-
-    public Builder setMasterCoprocessors(String[] masterCoprocessors) {
-      this.masterCoprocessors = masterCoprocessors;
-      return this;
-    }
-
-    public Builder setBalancerOn(Boolean balancerOn) {
-      this.balancerOn = balancerOn;
-      return this;
-    }
-
-    public Builder setMasterInfoPort(int masterInfoPort) {
-      this.masterInfoPort = masterInfoPort;
-      return this;
-    }
-
-    public ClusterStatus build() {
-      return new ClusterStatus(hbaseVersion, clusterId, liveServers,
-          deadServers, master, backupMasters, intransition, masterCoprocessors,
-          balancerOn, masterInfoPort);
-    }
-  }
-
-  /**
-   * Kinds of ClusterStatus
-   */
-  public enum Option {
-    HBASE_VERSION, /** status about hbase version */
-    CLUSTER_ID, /** status about cluster id */
-    BALANCER_ON, /** status about balancer is on or not */
-    LIVE_SERVERS, /** status about live region servers */
-    DEAD_SERVERS, /** status about dead region servers */
-    MASTER, /** status about master */
-    BACKUP_MASTERS, /** status about backup masters */
-    MASTER_COPROCESSORS, /** status about master coprocessors */
-    REGIONS_IN_TRANSITION, /** status about regions in transition */
-    MASTER_INFO_PORT /** master info port **/
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index a2b993f..811f83c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -21,184 +21,341 @@
 package org.apache.hadoop.hbase;
 
 import java.util.List;
-
+import java.util.Map;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.util.Strings;
 import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Strings;
 
 /**
-  * Encapsulates per-region load metrics.
-  */
+ * Encapsulates per-region load metrics.
+ * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+ *             Use {@link RegionMetrics} instead.
+ */
 @InterfaceAudience.Public
-public class RegionLoad {
-
+@Deprecated
+public class RegionLoad implements RegionMetrics {
+  // DONT use this pb object since the byte array backed may be modified in rpc layer
+  // we keep this pb object for BC.
   protected ClusterStatusProtos.RegionLoad regionLoadPB;
+  private final RegionMetrics metrics;
 
-  @InterfaceAudience.Private
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD")
   public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) {
     this.regionLoadPB = regionLoadPB;
+    this.metrics = RegionMetricsBuilder.toRegionMetrics(regionLoadPB);
+  }
+
+  RegionLoad(RegionMetrics metrics) {
+    this.metrics = metrics;
+    this.regionLoadPB = RegionMetricsBuilder.toRegionLoad(metrics);
   }
 
   /**
    * @return the region name
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRegionName} instead.
    */
+  @Deprecated
   public byte[] getName() {
-    return regionLoadPB.getRegionSpecifier().getValue().toByteArray();
+    return metrics.getRegionName();
   }
 
-  /**
-   * @return the region name as a string
-   */
-  public String getNameAsString() {
-    return Bytes.toStringBinary(getName());
+  @Override
+  public byte[] getRegionName() {
+    return metrics.getRegionName();
+  }
+
+  @Override
+  public int getStoreCount() {
+    return metrics.getStoreCount();
+  }
+
+  @Override
+  public int getStoreFileCount() {
+    return metrics.getStoreFileCount();
+  }
+
+  @Override
+  public Size getStoreFileSize() {
+    return metrics.getStoreFileSize();
+  }
+
+  @Override
+  public Size getMemStoreSize() {
+    return metrics.getMemStoreSize();
+  }
+
+  @Override
+  public long getReadRequestCount() {
+    return metrics.getReadRequestCount();
+  }
+
+  @Override
+  public long getFilteredReadRequestCount() {
+    return metrics.getFilteredReadRequestCount();
+  }
+
+  @Override
+  public Size getStoreFileIndexSize() {
+    return metrics.getStoreFileIndexSize();
+  }
+
+  @Override
+  public long getWriteRequestCount() {
+    return metrics.getWriteRequestCount();
+  }
+
+  @Override
+  public Size getStoreFileRootLevelIndexSize() {
+    return metrics.getStoreFileRootLevelIndexSize();
+  }
+
+  @Override
+  public Size getStoreFileUncompressedDataIndexSize() {
+    return metrics.getStoreFileUncompressedDataIndexSize();
+  }
+
+  @Override
+  public Size getBloomFilterSize() {
+    return metrics.getBloomFilterSize();
+  }
+
+  @Override
+  public long getCompactingCellCount() {
+    return metrics.getCompactingCellCount();
+  }
+
+  @Override
+  public long getCompactedCellCount() {
+    return metrics.getCompactedCellCount();
+  }
+
+  @Override
+  public long getCompletedSequenceId() {
+    return metrics.getCompletedSequenceId();
+  }
+
+  @Override
+  public Map<byte[], Long> getStoreSequenceId() {
+    return metrics.getStoreSequenceId();
+  }
+
+  @Override
+  public Size getUncompressedStoreFileSize() {
+    return metrics.getUncompressedStoreFileSize();
   }
 
   /**
    * @return the number of stores
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getStoreCount} instead.
    */
+  @Deprecated
   public int getStores() {
-    return regionLoadPB.getStores();
+    return metrics.getStoreCount();
   }
 
   /**
    * @return the number of storefiles
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getStoreFileCount} instead.
    */
+  @Deprecated
   public int getStorefiles() {
-    return regionLoadPB.getStorefiles();
+    return metrics.getStoreFileCount();
   }
 
   /**
    * @return the total size of the storefiles, in MB
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getStoreFileSize} instead.
    */
+  @Deprecated
   public int getStorefileSizeMB() {
-    return regionLoadPB.getStorefileSizeMB();
+    return (int) metrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
   }
 
   /**
    * @return the memstore size, in MB
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getMemStoreSize} instead.
    */
+  @Deprecated
   public int getMemStoreSizeMB() {
-    return regionLoadPB.getMemStoreSizeMB();
+    return (int) metrics.getMemStoreSize().get(Size.Unit.MEGABYTE);
   }
 
   /**
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    *             ((<a href="https://issues.apache.org/jira/browse/HBASE-3935">HBASE-3935</a>)).
-   *             Use {@link #getStorefileIndexSizeKB()} instead.
+   *             Use {@link #getStoreFileRootLevelIndexSize} instead.
    */
   @Deprecated
   public int getStorefileIndexSizeMB() {
     // Return value divided by 1024
-    return (int) (regionLoadPB.getStorefileIndexSizeKB() >> 10);
+    return (getRootIndexSizeKB() >> 10);
   }
 
-  public long getStorefileIndexSizeKB() {
-    return regionLoadPB.getStorefileIndexSizeKB();
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getStoreFileRootLevelIndexSize()} instead.
+   */
+  @Deprecated
+  public int getStorefileIndexSizeKB() {
+    return getRootIndexSizeKB();
   }
 
   /**
    * @return the number of requests made to region
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRequestCount()} instead.
    */
+  @Deprecated
   public long getRequestsCount() {
-    return getReadRequestsCount() + getWriteRequestsCount();
+    return metrics.getRequestCount();
   }
 
   /**
    * @return the number of read requests made to region
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getReadRequestCount} instead.
    */
+  @Deprecated
   public long getReadRequestsCount() {
-    return regionLoadPB.getReadRequestsCount();
+    return metrics.getReadRequestCount();
   }
 
   /**
    * @return the number of filtered read requests made to region
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getFilteredReadRequestCount} instead.
    */
+  @Deprecated
   public long getFilteredReadRequestsCount() {
-    return regionLoadPB.getFilteredReadRequestsCount();
+    return metrics.getFilteredReadRequestCount();
   }
 
   /**
    * @return the number of write requests made to region
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getWriteRequestCount} instead.
    */
+  @Deprecated
   public long getWriteRequestsCount() {
-    return regionLoadPB.getWriteRequestsCount();
+    return metrics.getWriteRequestCount();
   }
 
   /**
    * @return The current total size of root-level indexes for the region, in KB.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getStoreFileRootLevelIndexSize} instead.
    */
+  @Deprecated
   public int getRootIndexSizeKB() {
-    return regionLoadPB.getRootIndexSizeKB();
+    return (int) metrics.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE);
   }
 
   /**
    * @return The total size of all index blocks, not just the root level, in KB.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getStoreFileUncompressedDataIndexSize} instead.
    */
+  @Deprecated
   public int getTotalStaticIndexSizeKB() {
-    return regionLoadPB.getTotalStaticIndexSizeKB();
+    return (int) metrics.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
   }
 
   /**
    * @return The total size of all Bloom filter blocks, not just loaded into the
    * block cache, in KB.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getBloomFilterSize} instead.
    */
+  @Deprecated
   public int getTotalStaticBloomSizeKB() {
-    return regionLoadPB.getTotalStaticBloomSizeKB();
+    return (int) metrics.getBloomFilterSize().get(Size.Unit.KILOBYTE);
   }
 
   /**
    * @return the total number of kvs in current compaction
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getCompactingCellCount} instead.
    */
+  @Deprecated
   public long getTotalCompactingKVs() {
-    return regionLoadPB.getTotalCompactingKVs();
+    return metrics.getCompactingCellCount();
   }
 
   /**
    * @return the number of already compacted kvs in current compaction
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getCompactedCellCount} instead.
    */
+  @Deprecated
   public long getCurrentCompactedKVs() {
-    return regionLoadPB.getCurrentCompactedKVs();
+    return metrics.getCompactedCellCount();
   }
 
   /**
    * This does not really belong inside RegionLoad but its being done in the name of expediency.
    * @return the completed sequence Id for the region
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getCompletedSequenceId} instead.
    */
+  @Deprecated
   public long getCompleteSequenceId() {
-    return regionLoadPB.getCompleteSequenceId();
+    return metrics.getCompletedSequenceId();
   }
 
   /**
    * @return completed sequence id per store.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getStoreSequenceId} instead.
    */
-  public List<StoreSequenceId> getStoreCompleteSequenceId() {
-    return regionLoadPB.getStoreCompleteSequenceIdList();
+  @Deprecated
+  public List<ClusterStatusProtos.StoreSequenceId> getStoreCompleteSequenceId() {
+    return metrics.getStoreSequenceId().entrySet().stream()
+        .map(s -> ClusterStatusProtos.StoreSequenceId.newBuilder()
+                  .setFamilyName(UnsafeByteOperations.unsafeWrap(s.getKey()))
+                  .setSequenceId(s.getValue())
+                  .build())
+        .collect(Collectors.toList());
   }
 
   /**
    * @return the uncompressed size of the storefiles in MB.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getUncompressedStoreFileSize} instead.
    */
+  @Deprecated
   public int getStoreUncompressedSizeMB() {
-    return regionLoadPB.getStoreUncompressedSizeMB();
+    return (int) metrics.getUncompressedStoreFileSize().get(Size.Unit.KILOBYTE);
   }
 
   /**
    * @return the data locality of region in the regionserver.
    */
+  @Override
   public float getDataLocality() {
-    if (regionLoadPB.hasDataLocality()) {
-      return regionLoadPB.getDataLocality();
-    }
-    return 0.0f;
+    return metrics.getDataLocality();
+  }
+
+  @Override
+  public long getLastMajorCompactionTimestamp() {
+    return metrics.getLastMajorCompactionTimestamp();
   }
 
   /**
    * @return the timestamp of the oldest hfile for any store of this region.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getLastMajorCompactionTimestamp} instead.
    */
+  @Deprecated
   public long getLastMajorCompactionTs() {
-    return regionLoadPB.getLastMajorCompactionTs();
+    return metrics.getLastMajorCompactionTimestamp();
   }
 
   /**
@@ -208,47 +365,43 @@ public class RegionLoad {
   public String toString() {
     StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "numberOfStores",
         this.getStores());
-    sb = Strings.appendKeyValue(sb, "numberOfStorefiles",
-        this.getStorefiles());
-    sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
-      this.getStoreUncompressedSizeMB());
-    sb = Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp",
-      this.getLastMajorCompactionTs());
-    sb = Strings.appendKeyValue(sb, "storefileSizeMB",
-        this.getStorefileSizeMB());
+    Strings.appendKeyValue(sb, "numberOfStorefiles", this.getStorefiles());
+    Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
+        this.getStoreUncompressedSizeMB());
+    Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp",
+        this.getLastMajorCompactionTs());
+    Strings.appendKeyValue(sb, "storefileSizeMB", this.getStorefileSizeMB());
     if (this.getStoreUncompressedSizeMB() != 0) {
-      sb = Strings.appendKeyValue(sb, "compressionRatio",
+      Strings.appendKeyValue(sb, "compressionRatio",
           String.format("%.4f", (float) this.getStorefileSizeMB() /
               (float) this.getStoreUncompressedSizeMB()));
     }
-    sb = Strings.appendKeyValue(sb, "memstoreSizeMB",
+    Strings.appendKeyValue(sb, "memstoreSizeMB",
         this.getMemStoreSizeMB());
-    sb = Strings.appendKeyValue(sb, "storefileIndexSizeKB",
-        this.getStorefileIndexSizeKB());
-    sb = Strings.appendKeyValue(sb, "readRequestsCount",
+    Strings.appendKeyValue(sb, "readRequestsCount",
         this.getReadRequestsCount());
-    sb = Strings.appendKeyValue(sb, "writeRequestsCount",
+    Strings.appendKeyValue(sb, "writeRequestsCount",
         this.getWriteRequestsCount());
-    sb = Strings.appendKeyValue(sb, "rootIndexSizeKB",
+    Strings.appendKeyValue(sb, "rootIndexSizeKB",
         this.getRootIndexSizeKB());
-    sb = Strings.appendKeyValue(sb, "totalStaticIndexSizeKB",
+    Strings.appendKeyValue(sb, "totalStaticIndexSizeKB",
         this.getTotalStaticIndexSizeKB());
-    sb = Strings.appendKeyValue(sb, "totalStaticBloomSizeKB",
+    Strings.appendKeyValue(sb, "totalStaticBloomSizeKB",
         this.getTotalStaticBloomSizeKB());
-    sb = Strings.appendKeyValue(sb, "totalCompactingKVs",
+    Strings.appendKeyValue(sb, "totalCompactingKVs",
         this.getTotalCompactingKVs());
-    sb = Strings.appendKeyValue(sb, "currentCompactedKVs",
+    Strings.appendKeyValue(sb, "currentCompactedKVs",
         this.getCurrentCompactedKVs());
     float compactionProgressPct = Float.NaN;
     if (this.getTotalCompactingKVs() > 0) {
       compactionProgressPct = ((float) this.getCurrentCompactedKVs() /
           (float) this.getTotalCompactingKVs());
     }
-    sb = Strings.appendKeyValue(sb, "compactionProgressPct",
+    Strings.appendKeyValue(sb, "compactionProgressPct",
         compactionProgressPct);
-    sb = Strings.appendKeyValue(sb, "completeSequenceId",
+    Strings.appendKeyValue(sb, "completeSequenceId",
         this.getCompleteSequenceId());
-    sb = Strings.appendKeyValue(sb, "dataLocality",
+    Strings.appendKeyValue(sb, "dataLocality",
         this.getDataLocality());
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
new file mode 100644
index 0000000..e73683f
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
@@ -0,0 +1,147 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.util.Map;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+  * Encapsulates per-region load metrics.
+  */
+@InterfaceAudience.Public
+public interface RegionMetrics {
+
+  /**
+   * @return the region name
+   */
+  byte[] getRegionName();
+
+  /**
+   * @return the number of stores
+   */
+  int getStoreCount();
+
+  /**
+   * @return the number of storefiles
+   */
+  int getStoreFileCount();
+
+  /**
+   * @return the total size of the storefiles
+   */
+  Size getStoreFileSize();
+
+  /**
+   * @return the memstore size
+   */
+  Size getMemStoreSize();
+
+  /**
+   * @return the number of read requests made to region
+   */
+  long getReadRequestCount();
+
+  /**
+   * @return the number of write requests made to region
+   */
+  long getWriteRequestCount();
+
+  /**
+   * @return the number of write requests and read requests made to region
+   */
+  default long getRequestCount() {
+    return getReadRequestCount() + getWriteRequestCount();
+  }
+
+  /**
+   * @return the region name as a string
+   */
+  default String getNameAsString() {
+    return Bytes.toStringBinary(getRegionName());
+  }
+
+  /**
+   * @return the number of filtered read requests made to region
+   */
+  long getFilteredReadRequestCount();
+
+  /**
+   * TODO: why we pass the same value to different counters? Currently, the value from
+   * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize()
+   * see HRegionServer#createRegionLoad.
+   * @return The current total size of root-level indexes for the region
+   */
+  Size getStoreFileIndexSize();
+
+  /**
+   * @return The current total size of root-level indexes for the region
+   */
+  Size getStoreFileRootLevelIndexSize();
+
+  /**
+   * @return The total size of all index blocks, not just the root level
+   */
+  Size getStoreFileUncompressedDataIndexSize();
+
+  /**
+   * @return The total size of all Bloom filter blocks, not just loaded into the block cache
+   */
+  Size getBloomFilterSize();
+
+  /**
+   * @return the total number of cells in current compaction
+   */
+  long getCompactingCellCount();
+
+  /**
+   * @return the number of already compacted kvs in current compaction
+   */
+  long getCompactedCellCount();
+
+  /**
+   * This does not really belong inside RegionLoad but its being done in the name of expediency.
+   * @return the completed sequence Id for the region
+   */
+  long getCompletedSequenceId();
+
+  /**
+   * @return completed sequence id per store.
+   */
+  Map<byte[], Long> getStoreSequenceId();
+
+
+  /**
+   * @return the uncompressed size of the storefiles
+   */
+  Size getUncompressedStoreFileSize();
+
+  /**
+   * @return the data locality of region in the regionserver.
+   */
+  float getDataLocality();
+
+  /**
+   * @return the timestamp of the oldest hfile for any store of this region.
+   */
+  long getLastMajorCompactionTimestamp();
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
new file mode 100644
index 0000000..0fb6d3d
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
@@ -0,0 +1,437 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+
+@InterfaceAudience.Private
+public final class RegionMetricsBuilder {
+
+  public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regionLoadPB) {
+    return RegionMetricsBuilder
+        .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray())
+        .setBloomFilterSize(new Size(regionLoadPB.getTotalStaticBloomSizeKB(), Size.Unit.KILOBYTE))
+        .setCompactedCellCount(regionLoadPB.getCurrentCompactedKVs())
+        .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs())
+        .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId())
+        .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f)
+        .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount())
+        .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(),
+          Size.Unit.KILOBYTE))
+        .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs())
+        .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE))
+        .setReadRequestCount(regionLoadPB.getReadRequestsCount())
+        .setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
+        .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(),
+          Size.Unit.KILOBYTE))
+        .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
+          Size.Unit.KILOBYTE))
+        .setStoreCount(regionLoadPB.getStores())
+        .setStoreFileCount(regionLoadPB.getStorefiles())
+        .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
+        .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
+          .collect(Collectors.toMap(
+            (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(),
+              ClusterStatusProtos.StoreSequenceId::getSequenceId)))
+        .setUncompressedStoreFileSize(
+          new Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
+        .build();
+  }
+
+  private static List<ClusterStatusProtos.StoreSequenceId> toStoreSequenceId(
+      Map<byte[], Long> ids) {
+    return ids.entrySet().stream()
+        .map(e -> ClusterStatusProtos.StoreSequenceId.newBuilder()
+          .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey()))
+          .setSequenceId(e.getValue())
+          .build())
+        .collect(Collectors.toList());
+  }
+
+  public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
+    return ClusterStatusProtos.RegionLoad.newBuilder()
+        .setRegionSpecifier(HBaseProtos.RegionSpecifier
+          .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
+          .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName()))
+          .build())
+        .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize()
+          .get(Size.Unit.KILOBYTE))
+        .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount())
+        .setTotalCompactingKVs(regionMetrics.getCompactingCellCount())
+        .setCompleteSequenceId(regionMetrics.getCompletedSequenceId())
+        .setDataLocality(regionMetrics.getDataLocality())
+        .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount())
+        .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize()
+          .get(Size.Unit.KILOBYTE))
+        .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
+        .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
+        .setReadRequestsCount(regionMetrics.getReadRequestCount())
+        .setWriteRequestsCount(regionMetrics.getWriteRequestCount())
+        .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize()
+          .get(Size.Unit.KILOBYTE))
+        .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize()
+          .get(Size.Unit.KILOBYTE))
+        .setStores(regionMetrics.getStoreCount())
+        .setStorefiles(regionMetrics.getStoreCount())
+        .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE))
+        .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId()))
+        .setStoreUncompressedSizeMB(
+          (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))
+        .build();
+  }
+
+  public static RegionMetricsBuilder newBuilder(byte[] name) {
+    return new RegionMetricsBuilder(name);
+  }
+
+  private final byte[] name;
+  private int storeCount;
+  private int storeFileCount;
+  private long compactingCellCount;
+  private long compactedCellCount;
+  private Size storeFileSize = Size.ZERO;
+  private Size memStoreSize = Size.ZERO;
+  private Size indexSize = Size.ZERO;
+  private Size rootLevelIndexSize = Size.ZERO;
+  private Size uncompressedDataIndexSize = Size.ZERO;
+  private Size bloomFilterSize = Size.ZERO;
+  private Size uncompressedStoreFileSize = Size.ZERO;
+  private long writeRequestCount;
+  private long readRequestCount;
+  private long filteredReadRequestCount;
+  private long completedSequenceId;
+  private Map<byte[], Long> storeSequenceIds = Collections.emptyMap();
+  private float dataLocality;
+  private long lastMajorCompactionTimestamp;
+  private RegionMetricsBuilder(byte[] name) {
+    this.name = name;
+  }
+
+  public RegionMetricsBuilder setStoreCount(int value) {
+    this.storeCount = value;
+    return this;
+  }
+  public RegionMetricsBuilder setStoreFileCount(int value) {
+    this.storeFileCount = value;
+    return this;
+  }
+  public RegionMetricsBuilder setCompactingCellCount(long value) {
+    this.compactingCellCount = value;
+    return this;
+  }
+  public RegionMetricsBuilder setCompactedCellCount(long value) {
+    this.compactedCellCount = value;
+    return this;
+  }
+  public RegionMetricsBuilder setStoreFileSize(Size value) {
+    this.storeFileSize = value;
+    return this;
+  }
+  public RegionMetricsBuilder setMemStoreSize(Size value) {
+    this.memStoreSize = value;
+    return this;
+  }
+  public RegionMetricsBuilder setStoreFileIndexSize(Size value) {
+    this.indexSize = value;
+    return this;
+  }
+  public RegionMetricsBuilder setStoreFileRootLevelIndexSize(Size value) {
+    this.rootLevelIndexSize = value;
+    return this;
+  }
+  public RegionMetricsBuilder setStoreFileUncompressedDataIndexSize(Size value) {
+    this.uncompressedDataIndexSize = value;
+    return this;
+  }
+  public RegionMetricsBuilder setBloomFilterSize(Size value) {
+    this.bloomFilterSize = value;
+    return this;
+  }
+  public RegionMetricsBuilder setUncompressedStoreFileSize(Size value) {
+    this.uncompressedStoreFileSize = value;
+    return this;
+  }
+  public RegionMetricsBuilder setWriteRequestCount(long value) {
+    this.writeRequestCount = value;
+    return this;
+  }
+  public RegionMetricsBuilder setReadRequestCount(long value) {
+    this.readRequestCount = value;
+    return this;
+  }
+  public RegionMetricsBuilder setFilteredReadRequestCount(long value) {
+    this.filteredReadRequestCount = value;
+    return this;
+  }
+  public RegionMetricsBuilder setCompletedSequenceId(long value) {
+    this.completedSequenceId = value;
+    return this;
+  }
+  public RegionMetricsBuilder setStoreSequenceIds(Map<byte[], Long> value) {
+    this.storeSequenceIds = value;
+    return this;
+  }
+  public RegionMetricsBuilder setDataLocality(float value) {
+    this.dataLocality = value;
+    return this;
+  }
+  public RegionMetricsBuilder setLastMajorCompactionTimestamp(long value) {
+    this.lastMajorCompactionTimestamp = value;
+    return this;
+  }
+
+  public RegionMetrics build() {
+    return new RegionMetricsImpl(name,
+        storeCount,
+        storeFileCount,
+        compactingCellCount,
+        compactedCellCount,
+        storeFileSize,
+        memStoreSize,
+        indexSize,
+        rootLevelIndexSize,
+        uncompressedDataIndexSize,
+        bloomFilterSize,
+        uncompressedStoreFileSize,
+        writeRequestCount,
+        readRequestCount,
+        filteredReadRequestCount,
+        completedSequenceId,
+        storeSequenceIds,
+        dataLocality,
+        lastMajorCompactionTimestamp);
+  }
+
+  private static class RegionMetricsImpl implements RegionMetrics {
+    private final byte[] name;
+    private final int storeCount;
+    private final int storeFileCount;
+    private final long compactingCellCount;
+    private final long compactedCellCount;
+    private final Size storeFileSize;
+    private final Size memStoreSize;
+    private final Size indexSize;
+    private final Size rootLevelIndexSize;
+    private final Size uncompressedDataIndexSize;
+    private final Size bloomFilterSize;
+    private final Size uncompressedStoreFileSize;
+    private final long writeRequestCount;
+    private final long readRequestCount;
+    private final long filteredReadRequestCount;
+    private final long completedSequenceId;
+    private final Map<byte[], Long> storeSequenceIds;
+    private final float dataLocality;
+    private final long lastMajorCompactionTimestamp;
+    RegionMetricsImpl(byte[] name,
+        int storeCount,
+        int storeFileCount,
+        final long compactingCellCount,
+        long compactedCellCount,
+        Size storeFileSize,
+        Size memStoreSize,
+        Size indexSize,
+        Size rootLevelIndexSize,
+        Size uncompressedDataIndexSize,
+        Size bloomFilterSize,
+        Size uncompressedStoreFileSize,
+        long writeRequestCount,
+        long readRequestCount,
+        long filteredReadRequestCount,
+        long completedSequenceId,
+        Map<byte[], Long> storeSequenceIds,
+        float dataLocality,
+        long lastMajorCompactionTimestamp) {
+      this.name = Preconditions.checkNotNull(name);
+      this.storeCount = storeCount;
+      this.storeFileCount = storeFileCount;
+      this.compactingCellCount = compactingCellCount;
+      this.compactedCellCount = compactedCellCount;
+      this.storeFileSize = Preconditions.checkNotNull(storeFileSize);
+      this.memStoreSize = Preconditions.checkNotNull(memStoreSize);
+      this.indexSize = Preconditions.checkNotNull(indexSize);
+      this.rootLevelIndexSize = Preconditions.checkNotNull(rootLevelIndexSize);
+      this.uncompressedDataIndexSize = Preconditions.checkNotNull(uncompressedDataIndexSize);
+      this.bloomFilterSize = Preconditions.checkNotNull(bloomFilterSize);
+      this.uncompressedStoreFileSize = Preconditions.checkNotNull(uncompressedStoreFileSize);
+      this.writeRequestCount = writeRequestCount;
+      this.readRequestCount = readRequestCount;
+      this.filteredReadRequestCount = filteredReadRequestCount;
+      this.completedSequenceId = completedSequenceId;
+      this.storeSequenceIds = Preconditions.checkNotNull(storeSequenceIds);
+      this.dataLocality = dataLocality;
+      this.lastMajorCompactionTimestamp = lastMajorCompactionTimestamp;
+    }
+
+    @Override
+    public byte[] getRegionName() {
+      return name;
+    }
+
+    @Override
+    public int getStoreCount() {
+      return storeCount;
+    }
+
+    @Override
+    public int getStoreFileCount() {
+      return storeFileCount;
+    }
+
+    @Override
+    public Size getStoreFileSize() {
+      return storeFileSize;
+    }
+
+    @Override
+    public Size getMemStoreSize() {
+      return memStoreSize;
+    }
+
+    @Override
+    public long getReadRequestCount() {
+      return readRequestCount;
+    }
+
+    @Override
+    public long getFilteredReadRequestCount() {
+      return filteredReadRequestCount;
+    }
+
+    @Override
+    public long getWriteRequestCount() {
+      return writeRequestCount;
+    }
+
+    @Override
+    public Size getStoreFileIndexSize() {
+      return indexSize;
+    }
+
+    @Override
+    public Size getStoreFileRootLevelIndexSize() {
+      return rootLevelIndexSize;
+    }
+
+    @Override
+    public Size getStoreFileUncompressedDataIndexSize() {
+      return uncompressedDataIndexSize;
+    }
+
+    @Override
+    public Size getBloomFilterSize() {
+      return bloomFilterSize;
+    }
+
+    @Override
+    public long getCompactingCellCount() {
+      return compactingCellCount;
+    }
+
+    @Override
+    public long getCompactedCellCount() {
+      return compactedCellCount;
+    }
+
+    @Override
+    public long getCompletedSequenceId() {
+      return completedSequenceId;
+    }
+
+    @Override
+    public Map<byte[], Long> getStoreSequenceId() {
+      return Collections.unmodifiableMap(storeSequenceIds);
+    }
+
+    @Override
+    public Size getUncompressedStoreFileSize() {
+      return uncompressedStoreFileSize;
+    }
+
+    @Override
+    public float getDataLocality() {
+      return dataLocality;
+    }
+
+    @Override
+    public long getLastMajorCompactionTimestamp() {
+      return lastMajorCompactionTimestamp;
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount",
+          this.getStoreCount());
+      Strings.appendKeyValue(sb, "storeFileCount",
+          this.getStoreFileCount());
+      Strings.appendKeyValue(sb, "uncompressedStoreFileSize",
+          this.getUncompressedStoreFileSize());
+      Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp",
+          this.getLastMajorCompactionTimestamp());
+      Strings.appendKeyValue(sb, "storeFileSize",
+          this.getStoreFileSize());
+      if (this.getUncompressedStoreFileSize().get() != 0) {
+        Strings.appendKeyValue(sb, "compressionRatio",
+            String.format("%.4f",
+                (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) /
+                (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)));
+      }
+      Strings.appendKeyValue(sb, "memStoreSize",
+          this.getMemStoreSize());
+      Strings.appendKeyValue(sb, "readRequestCount",
+          this.getReadRequestCount());
+      Strings.appendKeyValue(sb, "writeRequestCount",
+          this.getWriteRequestCount());
+      Strings.appendKeyValue(sb, "rootLevelIndexSize",
+          this.getStoreFileRootLevelIndexSize());
+      Strings.appendKeyValue(sb, "uncompressedDataIndexSize",
+          this.getStoreFileUncompressedDataIndexSize());
+      Strings.appendKeyValue(sb, "bloomFilterSize",
+          this.getBloomFilterSize());
+      Strings.appendKeyValue(sb, "compactingCellCount",
+          this.getCompactingCellCount());
+      Strings.appendKeyValue(sb, "compactedCellCount",
+          this.getCompactedCellCount());
+      float compactionProgressPct = Float.NaN;
+      if (this.getCompactingCellCount() > 0) {
+        compactionProgressPct = ((float) this.getCompactedCellCount() /
+            (float) this.getCompactingCellCount());
+      }
+      Strings.appendKeyValue(sb, "compactionProgressPct",
+          compactionProgressPct);
+      Strings.appendKeyValue(sb, "completedSequenceId",
+          this.getCompletedSequenceId());
+      Strings.appendKeyValue(sb, "dataLocality",
+          this.getDataLocality());
+      return sb.toString();
+    }
+  }
+
+}