You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by op...@apache.org on 2019/10/08 01:32:41 UTC

[hbase] branch branch-2.2 updated: HBASE-22903 : Table to RegionStatesCount metrics - Use for broken alter_status command (#611)

This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
     new 4a37572  HBASE-22903 : Table to RegionStatesCount metrics - Use for broken alter_status command (#611)
4a37572 is described below

commit 4a37572251a46f4cc3af529189add548405b1eb4
Author: Viraj Jasani <vi...@gmail.com>
AuthorDate: Mon Sep 30 15:05:22 2019 +0530

    HBASE-22903 : Table to RegionStatesCount metrics - Use for broken alter_status command (#611)
    
    Signed-off-by: huzheng <op...@gmail.com>
---
 .../org/apache/hadoop/hbase/ClusterMetrics.java    |  15 +-
 .../apache/hadoop/hbase/ClusterMetricsBuilder.java |  42 +++++-
 .../org/apache/hadoop/hbase/ClusterStatus.java     |   7 +
 .../hadoop/hbase/client/RegionStatesCount.java     | 167 +++++++++++++++++++++
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  48 ++++++
 .../src/main/protobuf/ClusterStatus.proto          |  15 ++
 .../org/apache/hadoop/hbase/master/HMaster.java    |  19 +++
 .../hbase/master/assignment/AssignmentManager.java |  38 +++++
 .../hadoop/hbase/TestClientClusterMetrics.java     |  55 +++++++
 hbase-shell/src/main/ruby/hbase/admin.rb           |  15 +-
 hbase-shell/src/test/ruby/hbase/admin_test.rb      |   7 +
 11 files changed, 418 insertions(+), 10 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
index acced3e..497ab93 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
@@ -23,6 +23,7 @@ import edu.umd.cs.findbugs.annotations.Nullable;
 import java.util.List;
 import java.util.Map;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -153,6 +154,14 @@ public interface ClusterMetrics {
   }
 
   /**
+   * Provide region states count for given table.
+   * e.g howmany regions of give table are opened/closed/rit etc
+   *
+   * @return map of table to region states count
+   */
+  Map<TableName, RegionStatesCount> getTableRegionStatesCount();
+
+  /**
    * Kinds of ClusterMetrics
    */
   enum Option {
@@ -199,6 +208,10 @@ public interface ClusterMetrics {
     /**
      * metrics about live region servers name
      */
-    SERVERS_NAME
+    SERVERS_NAME,
+    /**
+     * metrics about table to no of regions status count
+     */
+    TABLE_TO_REGIONS_COUNT,
   }
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
index 30728ac..493fe71 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
@@ -26,6 +26,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.stream.Collectors;
+
+import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -70,7 +72,14 @@ public final class ClusterMetricsBuilder {
             .collect(Collectors.toList()))
         .setMasterInfoPort(metrics.getMasterInfoPort())
         .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName)
-            .collect(Collectors.toList()));
+          .collect(Collectors.toList()))
+        .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream()
+          .map(status ->
+            ClusterStatusProtos.TableRegionStatesCount.newBuilder()
+              .setTableName(ProtobufUtil.toProtoTableName((status.getKey())))
+              .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue()))
+              .build())
+          .collect(Collectors.toList()));
     if (metrics.getMasterName() != null) {
       builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
     }
@@ -108,7 +117,12 @@ public final class ClusterMetricsBuilder {
             .map(HBaseProtos.Coprocessor::getName)
             .collect(Collectors.toList()))
         .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName)
-            .collect(Collectors.toList()));
+            .collect(Collectors.toList()))
+        .setTableRegionStatesCount(
+          proto.getTableRegionStatesCountList().stream()
+          .collect(Collectors.toMap(
+            e -> ProtobufUtil.toTableName(e.getTableName()),
+            e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount()))));
     if (proto.hasClusterId()) {
       builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString());
     }
@@ -149,6 +163,7 @@ public final class ClusterMetricsBuilder {
       case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON;
       case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME;
       case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT;
+      case TABLE_TO_REGIONS_COUNT: return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT;
       // should not reach here
       default: throw new IllegalArgumentException("Invalid option: " + option);
     }
@@ -172,6 +187,7 @@ public final class ClusterMetricsBuilder {
       case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
       case SERVERS_NAME: return Option.SERVERS_NAME;
       case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT;
+      case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT;
       // should not reach here
       default: throw new IllegalArgumentException("Invalid option: " + option);
     }
@@ -214,6 +230,7 @@ public final class ClusterMetricsBuilder {
   private Boolean balancerOn;
   private int masterInfoPort;
   private List<ServerName> serversName = Collections.emptyList();
+  private Map<TableName, RegionStatesCount> tableRegionStatesCount = Collections.emptyMap();
 
   private ClusterMetricsBuilder() {
   }
@@ -263,6 +280,13 @@ public final class ClusterMetricsBuilder {
     this.serversName = serversName;
     return this;
   }
+
+  public ClusterMetricsBuilder setTableRegionStatesCount(
+      Map<TableName, RegionStatesCount> tableRegionStatesCount) {
+    this.tableRegionStatesCount = tableRegionStatesCount;
+    return this;
+  }
+
   public ClusterMetrics build() {
     return new ClusterMetricsImpl(
         hbaseVersion,
@@ -275,7 +299,9 @@ public final class ClusterMetricsBuilder {
         masterCoprocessorNames,
         balancerOn,
         masterInfoPort,
-        serversName);
+        serversName,
+        tableRegionStatesCount
+    );
   }
   private static class ClusterMetricsImpl implements ClusterMetrics {
     @Nullable
@@ -293,6 +319,7 @@ public final class ClusterMetricsBuilder {
     private final Boolean balancerOn;
     private final int masterInfoPort;
     private final List<ServerName> serversName;
+    private final Map<TableName, RegionStatesCount> tableRegionStatesCount;
 
     ClusterMetricsImpl(String hbaseVersion, List<ServerName> deadServerNames,
         Map<ServerName, ServerMetrics> liveServerMetrics,
@@ -303,7 +330,8 @@ public final class ClusterMetricsBuilder {
         List<String> masterCoprocessorNames,
         Boolean balancerOn,
         int masterInfoPort,
-        List<ServerName> serversName) {
+        List<ServerName> serversName,
+        Map<TableName, RegionStatesCount> tableRegionStatesCount) {
       this.hbaseVersion = hbaseVersion;
       this.deadServerNames = Preconditions.checkNotNull(deadServerNames);
       this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics);
@@ -315,6 +343,7 @@ public final class ClusterMetricsBuilder {
       this.balancerOn = balancerOn;
       this.masterInfoPort = masterInfoPort;
       this.serversName = serversName;
+      this.tableRegionStatesCount = Preconditions.checkNotNull(tableRegionStatesCount);
     }
 
     @Override
@@ -373,6 +402,11 @@ public final class ClusterMetricsBuilder {
     }
 
     @Override
+    public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
+      return Collections.unmodifiableMap(tableRegionStatesCount);
+    }
+
+    @Override
     public String toString() {
       StringBuilder sb = new StringBuilder(1024);
       sb.append("Master: " + getMasterName());
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 33c30dd..6fdb588 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -26,6 +26,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
+
+import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -350,6 +352,11 @@ public class ClusterStatus implements ClusterMetrics {
   }
 
   @Override
+  public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
+    return metrics.getTableRegionStatesCount();
+  }
+
+  @Override
   public String toString() {
     StringBuilder sb = new StringBuilder(1024);
     sb.append("Master: " + metrics.getMasterName());
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java
new file mode 100644
index 0000000..1e1ce95
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java
@@ -0,0 +1,167 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public final class RegionStatesCount {
+
+  private int openRegions;
+  private int splitRegions;
+  private int closedRegions;
+  private int regionsInTransition;
+  private int totalRegions;
+
+  private RegionStatesCount() {
+  }
+
+  public int getClosedRegions() {
+    return closedRegions;
+  }
+
+  public int getOpenRegions() {
+    return openRegions;
+  }
+
+  public int getSplitRegions() {
+    return splitRegions;
+  }
+
+  public int getRegionsInTransition() {
+    return regionsInTransition;
+  }
+
+  public int getTotalRegions() {
+    return totalRegions;
+  }
+
+  private void setClosedRegions(int closedRegions) {
+    this.closedRegions = closedRegions;
+  }
+
+  private void setOpenRegions(int openRegions) {
+    this.openRegions = openRegions;
+  }
+
+  private void setSplitRegions(int splitRegions) {
+    this.splitRegions = splitRegions;
+  }
+
+  private void setRegionsInTransition(int regionsInTransition) {
+    this.regionsInTransition = regionsInTransition;
+  }
+
+  private void setTotalRegions(int totalRegions) {
+    this.totalRegions = totalRegions;
+  }
+
+  public static class RegionStatesCountBuilder {
+    private int openRegions;
+    private int splitRegions;
+    private int closedRegions;
+    private int regionsInTransition;
+    private int totalRegions;
+
+    public RegionStatesCountBuilder setOpenRegions(int openRegions) {
+      this.openRegions = openRegions;
+      return this;
+    }
+
+    public RegionStatesCountBuilder setSplitRegions(int splitRegions) {
+      this.splitRegions = splitRegions;
+      return this;
+    }
+
+    public RegionStatesCountBuilder setClosedRegions(int closedRegions) {
+      this.closedRegions = closedRegions;
+      return this;
+    }
+
+    public RegionStatesCountBuilder setRegionsInTransition(int regionsInTransition) {
+      this.regionsInTransition = regionsInTransition;
+      return this;
+    }
+
+    public RegionStatesCountBuilder setTotalRegions(int totalRegions) {
+      this.totalRegions = totalRegions;
+      return this;
+    }
+
+    public RegionStatesCount build() {
+      RegionStatesCount regionStatesCount=new RegionStatesCount();
+      regionStatesCount.setOpenRegions(openRegions);
+      regionStatesCount.setClosedRegions(closedRegions);
+      regionStatesCount.setRegionsInTransition(regionsInTransition);
+      regionStatesCount.setSplitRegions(splitRegions);
+      regionStatesCount.setTotalRegions(totalRegions);
+      return regionStatesCount;
+    }
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder("RegionStatesCount{");
+    sb.append("openRegions=").append(openRegions);
+    sb.append(", splitRegions=").append(splitRegions);
+    sb.append(", closedRegions=").append(closedRegions);
+    sb.append(", regionsInTransition=").append(regionsInTransition);
+    sb.append(", totalRegions=").append(totalRegions);
+    sb.append('}');
+    return sb.toString();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    RegionStatesCount that = (RegionStatesCount) o;
+
+    if (openRegions != that.openRegions) {
+      return false;
+    }
+    if (splitRegions != that.splitRegions) {
+      return false;
+    }
+    if (closedRegions != that.closedRegions) {
+      return false;
+    }
+    if (regionsInTransition != that.regionsInTransition) {
+      return false;
+    }
+    return totalRegions == that.totalRegions;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = openRegions;
+    result = 31 * result + splitRegions;
+    result = 31 * result + closedRegions;
+    result = 31 * result + regionsInTransition;
+    result = 31 * result + totalRegions;
+    return result;
+  }
+
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 9410b4b..753dd55 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLoadStats;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.SnapshotDescription;
@@ -3284,4 +3285,51 @@ public final class ProtobufUtil {
     }
     return Collections.emptySet();
   }
+
+  public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount(
+      RegionStatesCount regionStatesCount) {
+    int openRegions = 0;
+    int splitRegions = 0;
+    int closedRegions = 0;
+    int regionsInTransition = 0;
+    int totalRegions = 0;
+    if (regionStatesCount != null) {
+      openRegions = regionStatesCount.getOpenRegions();
+      splitRegions = regionStatesCount.getSplitRegions();
+      closedRegions = regionStatesCount.getClosedRegions();
+      regionsInTransition = regionStatesCount.getRegionsInTransition();
+      totalRegions = regionStatesCount.getTotalRegions();
+    }
+    return ClusterStatusProtos.RegionStatesCount.newBuilder()
+      .setOpenRegions(openRegions)
+      .setSplitRegions(splitRegions)
+      .setClosedRegions(closedRegions)
+      .setRegionsInTransition(regionsInTransition)
+      .setTotalRegions(totalRegions)
+      .build();
+  }
+
+  public static RegionStatesCount toTableRegionStatesCount(
+    ClusterStatusProtos.RegionStatesCount regionStatesCount) {
+    int openRegions = 0;
+    int splitRegions = 0;
+    int closedRegions = 0;
+    int regionsInTransition = 0;
+    int totalRegions = 0;
+    if (regionStatesCount != null) {
+      closedRegions = regionStatesCount.getClosedRegions();
+      regionsInTransition = regionStatesCount.getRegionsInTransition();
+      openRegions = regionStatesCount.getOpenRegions();
+      splitRegions = regionStatesCount.getSplitRegions();
+      totalRegions = regionStatesCount.getTotalRegions();
+    }
+    return new RegionStatesCount.RegionStatesCountBuilder()
+      .setOpenRegions(openRegions)
+      .setSplitRegions(splitRegions)
+      .setClosedRegions(closedRegions)
+      .setRegionsInTransition(regionsInTransition)
+      .setTotalRegions(totalRegions)
+      .build();
+  }
+
 }
diff --git a/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto
index c3fe19d..82ca563 100644
--- a/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto
@@ -218,6 +218,19 @@ message LiveServerInfo {
   required ServerLoad server_load = 2;
 }
 
+message RegionStatesCount {
+  required uint32 open_regions = 1;
+  required uint32 split_regions = 2;
+  required uint32 closed_regions = 3;
+  required uint32 regions_in_transition = 4;
+  required uint32 total_regions = 5;
+}
+
+message TableRegionStatesCount {
+  required TableName table_name = 1;
+  required RegionStatesCount region_states_count = 2;
+}
+
 message ClusterStatus {
   optional HBaseVersionFileContent hbase_version = 1;
   repeated LiveServerInfo live_servers = 2;
@@ -230,6 +243,7 @@ message ClusterStatus {
   optional bool balancer_on = 9;
   optional int32 master_info_port = 10 [default = -1];
   repeated ServerName servers_name = 11;
+  repeated TableRegionStatesCount table_region_states_count = 12;
 }
 
 enum Option {
@@ -244,4 +258,5 @@ enum Option {
   BALANCER_ON = 8;
   MASTER_INFO_PORT = 9;
   SERVERS_NAME = 10;
+  TABLE_TO_REGIONS_COUNT = 11;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 6d29684..8722b4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
@@ -2517,6 +2518,24 @@ public class HMaster extends HRegionServer implements MasterServices {
           }
           break;
         }
+        case TABLE_TO_REGIONS_COUNT: {
+          if (isActiveMaster() && isInitialized() && assignmentManager != null) {
+            try {
+              Map<TableName, RegionStatesCount> tableRegionStatesCountMap = new HashMap<>();
+              Map<String, TableDescriptor> tableDescriptorMap = getTableDescriptors().getAll();
+              for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) {
+                TableName tableName = tableDescriptor.getTableName();
+                RegionStatesCount regionStatesCount = assignmentManager
+                  .getRegionStatesCount(tableName);
+                tableRegionStatesCountMap.put(tableName, regionStatesCount);
+              }
+              builder.setTableRegionStatesCount(tableRegionStatesCountMap);
+            } catch (IOException e) {
+              LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e);
+            }
+          }
+          break;
+        }
       }
     }
     return builder.build();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 6e9f337..1512674 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.DoNotRetryRegionException;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
@@ -2056,4 +2057,41 @@ public class AssignmentManager {
     }
     return rsReportsSnapshot;
   }
+
+  /**
+   * Provide regions state count for given table.
+   * e.g howmany regions of give table are opened/closed/rit etc
+   *
+   * @param tableName TableName
+   * @return region states count
+   */
+  public RegionStatesCount getRegionStatesCount(TableName tableName) {
+    int openRegionsCount = 0;
+    int closedRegionCount = 0;
+    int ritCount = 0;
+    int splitRegionCount = 0;
+    int totalRegionCount = 0;
+    if (!isTableDisabled(tableName)) {
+      final List<RegionState> states = regionStates.getTableRegionStates(tableName);
+      for (RegionState regionState : states) {
+        if (regionState.isOpened()) {
+          openRegionsCount++;
+        } else if (regionState.isClosed()) {
+          closedRegionCount++;
+        } else if (regionState.isSplit()) {
+          splitRegionCount++;
+        }
+      }
+      totalRegionCount = states.size();
+      ritCount = totalRegionCount - openRegionsCount - splitRegionCount;
+    }
+    return new RegionStatesCount.RegionStatesCountBuilder()
+      .setOpenRegions(openRegionsCount)
+      .setClosedRegions(closedRegionCount)
+      .setSplitRegions(splitRegionCount)
+      .setRegionsInTransition(ritCount)
+      .setTotalRegions(totalRegionCount)
+      .build();
+  }
+
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
index 2ea03a6..cbba505 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
@@ -30,6 +30,9 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.AsyncAdmin;
 import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@@ -38,6 +41,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.junit.AfterClass;
@@ -60,6 +64,9 @@ public class TestClientClusterMetrics {
   private final static int MASTERS = 3;
   private static MiniHBaseCluster CLUSTER;
   private static HRegionServer DEAD;
+  private static final TableName TABLE_NAME = TableName.valueOf("test");
+  private static final byte[] CF = Bytes.toBytes("cf");
+
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -123,6 +130,11 @@ public class TestClientClusterMetrics {
         defaults.getLiveServerMetrics().size());
       Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
       Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
+      origin.getTableRegionStatesCount().forEach(((tableName, regionStatesCount) -> {
+        RegionStatesCount defaultRegionStatesCount = defaults.getTableRegionStatesCount()
+          .get(tableName);
+        Assert.assertEquals(defaultRegionStatesCount, regionStatesCount);
+      }));
     }
   }
 
@@ -168,6 +180,38 @@ public class TestClientClusterMetrics {
   }
 
   @Test
+  public void testRegionStatesCount() throws Exception {
+    Table table = UTIL.createTable(TABLE_NAME, CF);
+    table.put(new Put(Bytes.toBytes("k1"))
+      .addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("v1")));
+    table.put(new Put(Bytes.toBytes("k2"))
+      .addColumn(CF, Bytes.toBytes("q2"), Bytes.toBytes("v2")));
+    table.put(new Put(Bytes.toBytes("k3"))
+      .addColumn(CF, Bytes.toBytes("q3"), Bytes.toBytes("v3")));
+
+    ClusterMetrics metrics = ADMIN.getClusterMetrics();
+    Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 3);
+    Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+      .getRegionsInTransition(), 0);
+    Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+      .getOpenRegions(), 1);
+    Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+      .getTotalRegions(), 1);
+    Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+      .getClosedRegions(), 0);
+    Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+      .getSplitRegions(), 0);
+    Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
+      .getRegionsInTransition(), 0);
+    Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
+      .getOpenRegions(), 1);
+    Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
+      .getTotalRegions(), 1);
+
+    UTIL.deleteTable(TABLE_NAME);
+  }
+
+  @Test
   public void testMasterAndBackupMastersStatus() throws Exception {
     // get all the master threads
     List<MasterThread> masterThreads = CLUSTER.getMasterThreads();
@@ -224,6 +268,17 @@ public class TestClientClusterMetrics {
     Assert.assertEquals(postCount + 1, MyObserver.POST_COUNT.get());
   }
 
+  private static void insertData(final TableName tableName, int startRow, int rowCount)
+      throws IOException {
+    Table t = UTIL.getConnection().getTable(tableName);
+    Put p;
+    for (int i = 0; i < rowCount; i++) {
+      p = new Put(Bytes.toBytes("" + (startRow + i)));
+      p.addColumn(CF, Bytes.toBytes("val1"), Bytes.toBytes(i));
+      t.put(p);
+    }
+  }
+
   public static class MyObserver implements MasterCoprocessor, MasterObserver {
     private static final AtomicInteger PRE_COUNT = new AtomicInteger(0);
     private static final AtomicInteger POST_COUNT = new AtomicInteger(0);
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 4c88291..b0c1bde 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -605,16 +605,21 @@ module Hbase
       # Table should exist
       raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
 
-      status = Pair.new
       begin
-        status = @admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name))
-        if status.getSecond != 0
-          puts "#{status.getSecond - status.getFirst}/#{status.getSecond} regions updated."
+        cluster_metrics = @admin.getClusterMetrics
+        table_region_status = cluster_metrics
+                              .getTableRegionStatesCount
+                              .get(org.apache.hadoop.hbase.TableName.valueOf(table_name))
+        if table_region_status.getTotalRegions != 0
+          updated_regions = table_region_status.getTotalRegions -
+                            table_region_status.getRegionsInTransition -
+                            table_region_status.getClosedRegions
+          puts "#{updated_regions}/#{table_region_status.getTotalRegions} regions updated."
         else
           puts 'All regions updated.'
         end
         sleep 1
-      end while !status.nil? && status.getFirst != 0
+      end while !table_region_status.nil? && table_region_status.getRegionsInTransition != 0
       puts 'Done.'
     end
 
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index a43e394..2013d17 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -110,6 +110,13 @@ module Hbase
 
     #-------------------------------------------------------------------------------
 
+    define_test 'alter_status should work' do
+      output = capture_stdout { command(:alter_status, @test_name) }
+      assert(output.include?('1/1 regions updated'))
+    end
+
+    #-------------------------------------------------------------------------------
+
     define_test "compact should work" do
       command(:compact, 'hbase:meta')
     end