You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/09/28 12:30:34 UTC

[04/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
index b360145..c33cd56 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.hbase.master.balancer;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -28,15 +33,17 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.stream.Collectors;
+
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -48,17 +55,14 @@ import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegi
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.net.DNSToSwitchMapping;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 
 @Category({MasterTests.class, MediumTests.class})
 public class TestBaseLoadBalancer extends BalancerTestBase {
@@ -108,13 +112,13 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
 
   public static class MockBalancer extends BaseLoadBalancer {
     @Override
-    public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
+    public List<RegionPlan> balanceCluster(Map<ServerName, List<RegionInfo>> clusterState) {
       return null;
     }
 
     @Override
     public List<RegionPlan> balanceCluster(TableName tableName,
-        Map<ServerName, List<HRegionInfo>> clusterState) throws HBaseIOException {
+        Map<ServerName, List<RegionInfo>> clusterState) throws HBaseIOException {
       return null;
     }
   }
@@ -125,9 +129,9 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
    * @param servers
    * @param assignments
    */
-  private void assertImmediateAssignment(List<HRegionInfo> regions, List<ServerName> servers,
-      Map<HRegionInfo, ServerName> assignments) {
-    for (HRegionInfo region : regions) {
+  private void assertImmediateAssignment(List<RegionInfo> regions, List<ServerName> servers,
+      Map<RegionInfo, ServerName> assignments) {
+    for (RegionInfo region : regions) {
       assertTrue(assignments.containsKey(region));
     }
   }
@@ -143,31 +147,31 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
   @Test (timeout=180000)
   public void testBulkAssignment() throws Exception {
     List<ServerName> tmp = getListOfServerNames(randomServers(5, 0));
-    List<HRegionInfo> hris = randomRegions(20);
-    hris.add(HRegionInfo.FIRST_META_REGIONINFO);
+    List<RegionInfo> hris = randomRegions(20);
+    hris.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
     tmp.add(master);
-    Map<ServerName, List<HRegionInfo>> plans = loadBalancer.roundRobinAssignment(hris, tmp);
+    Map<ServerName, List<RegionInfo>> plans = loadBalancer.roundRobinAssignment(hris, tmp);
     if (LoadBalancer.isTablesOnMaster(loadBalancer.getConf())) {
-      assertTrue(plans.get(master).contains(HRegionInfo.FIRST_META_REGIONINFO));
+      assertTrue(plans.get(master).contains(RegionInfoBuilder.FIRST_META_REGIONINFO));
       assertEquals(1, plans.get(master).size());
     }
     int totalRegion = 0;
-    for (List<HRegionInfo> regions: plans.values()) {
+    for (List<RegionInfo> regions: plans.values()) {
       totalRegion += regions.size();
     }
     assertEquals(hris.size(), totalRegion);
     for (int[] mock : regionsAndServersMocks) {
       LOG.debug("testBulkAssignment with " + mock[0] + " regions and " + mock[1] + " servers");
-      List<HRegionInfo> regions = randomRegions(mock[0]);
+      List<RegionInfo> regions = randomRegions(mock[0]);
       List<ServerAndLoad> servers = randomServers(mock[1], 0);
       List<ServerName> list = getListOfServerNames(servers);
-      Map<ServerName, List<HRegionInfo>> assignments =
+      Map<ServerName, List<RegionInfo>> assignments =
           loadBalancer.roundRobinAssignment(regions, list);
       float average = (float) regions.size() / servers.size();
       int min = (int) Math.floor(average);
       int max = (int) Math.ceil(average);
       if (assignments != null && !assignments.isEmpty()) {
-        for (List<HRegionInfo> regionList : assignments.values()) {
+        for (List<RegionInfo> regionList : assignments.values()) {
           assertTrue(regionList.size() == min || regionList.size() == max);
         }
       }
@@ -185,8 +189,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
   public void testRetainAssignment() throws Exception {
     // Test simple case where all same servers are there
     List<ServerAndLoad> servers = randomServers(10, 10);
-    List<HRegionInfo> regions = randomRegions(100);
-    Map<HRegionInfo, ServerName> existing = new TreeMap<>();
+    List<RegionInfo> regions = randomRegions(100);
+    Map<RegionInfo, ServerName> existing = new TreeMap<>(RegionInfo.COMPARATOR);
     for (int i = 0; i < regions.size(); i++) {
       ServerName sn = servers.get(i % servers.size()).getServerName();
       // The old server would have had same host and port, but different
@@ -196,7 +200,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
       existing.put(regions.get(i), snWithOldStartCode);
     }
     List<ServerName> listOfServerNames = getListOfServerNames(servers);
-    Map<ServerName, List<HRegionInfo>> assignment =
+    Map<ServerName, List<RegionInfo>> assignment =
         loadBalancer.retainAssignment(existing, listOfServerNames);
     assertRetainedAssignment(existing, listOfServerNames, assignment);
 
@@ -236,7 +240,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
     allServers.addAll(idleServers);
     LoadBalancer balancer = new MockBalancer() {
       @Override
-      public boolean shouldBeOnMaster(HRegionInfo region) {
+      public boolean shouldBeOnMaster(RegionInfo region) {
         return false;
       }
     };
@@ -249,9 +253,12 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
     MasterServices services = Mockito.mock(MasterServices.class);
     Mockito.when(services.getServerManager()).thenReturn(sm);
     balancer.setMasterServices(services);
-    HRegionInfo hri1 = new HRegionInfo(
-        TableName.valueOf(name.getMethodName()), "key1".getBytes(), "key2".getBytes(),
-        false, 100);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+        .setStartKey("key1".getBytes())
+        .setEndKey("key2".getBytes())
+        .setSplit(false)
+        .setRegionId(100)
+        .build();
     assertNull(balancer.randomAssignment(hri1, Collections.EMPTY_LIST));
     assertNull(balancer.randomAssignment(hri1, null));
     for (int i = 0; i != 3; ++i) {
@@ -267,23 +274,29 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
     // replica from one node to a specific other node or rack lowers the
     // availability of the region or not
 
-    List<HRegionInfo> list0 = new ArrayList<>();
-    List<HRegionInfo> list1 = new ArrayList<>();
-    List<HRegionInfo> list2 = new ArrayList<>();
+    List<RegionInfo> list0 = new ArrayList<>();
+    List<RegionInfo> list1 = new ArrayList<>();
+    List<RegionInfo> list2 = new ArrayList<>();
     // create a region (region1)
-    HRegionInfo hri1 = new HRegionInfo(
-        TableName.valueOf(name.getMethodName()), "key1".getBytes(), "key2".getBytes(),
-        false, 100);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+        .setStartKey("key1".getBytes())
+        .setEndKey("key2".getBytes())
+        .setSplit(false)
+        .setRegionId(100)
+        .build();
     // create a replica of the region (replica_of_region1)
-    HRegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
+    RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
     // create a second region (region2)
-    HRegionInfo hri3 = new HRegionInfo(
-        TableName.valueOf(name.getMethodName()), "key2".getBytes(), "key3".getBytes(),
-        false, 101);
+    RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+        .setStartKey("key2".getBytes())
+        .setEndKey("key3".getBytes())
+        .setSplit(false)
+        .setRegionId(101)
+        .build();
     list0.add(hri1); //only region1
     list1.add(hri2); //only replica_of_region1
     list2.add(hri3); //only region2
-    Map<ServerName, List<HRegionInfo>> clusterState = new LinkedHashMap<>();
+    Map<ServerName, List<RegionInfo>> clusterState = new LinkedHashMap<>();
     clusterState.put(servers[0], list0); //servers[0] hosts region1
     clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1
     clusterState.put(servers[2], list2); //servers[2] hosts region2
@@ -335,23 +348,29 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
 
   @Test (timeout=180000)
   public void testRegionAvailabilityWithRegionMoves() throws Exception {
-    List<HRegionInfo> list0 = new ArrayList<>();
-    List<HRegionInfo> list1 = new ArrayList<>();
-    List<HRegionInfo> list2 = new ArrayList<>();
+    List<RegionInfo> list0 = new ArrayList<>();
+    List<RegionInfo> list1 = new ArrayList<>();
+    List<RegionInfo> list2 = new ArrayList<>();
     // create a region (region1)
-    HRegionInfo hri1 = new HRegionInfo(
-        TableName.valueOf(name.getMethodName()), "key1".getBytes(), "key2".getBytes(),
-        false, 100);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+        .setStartKey("key1".getBytes())
+        .setEndKey("key2".getBytes())
+        .setSplit(false)
+        .setRegionId(100)
+        .build();
     // create a replica of the region (replica_of_region1)
-    HRegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
+    RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
     // create a second region (region2)
-    HRegionInfo hri3 = new HRegionInfo(
-        TableName.valueOf(name.getMethodName()), "key2".getBytes(), "key3".getBytes(),
-        false, 101);
+    RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+        .setStartKey("key2".getBytes())
+        .setEndKey("key3".getBytes())
+        .setSplit(false)
+        .setRegionId(101)
+        .build();
     list0.add(hri1); //only region1
     list1.add(hri2); //only replica_of_region1
     list2.add(hri3); //only region2
-    Map<ServerName, List<HRegionInfo>> clusterState = new LinkedHashMap<>();
+    Map<ServerName, List<RegionInfo>> clusterState = new LinkedHashMap<>();
     clusterState.put(servers[0], list0); //servers[0] hosts region1
     clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1
     clusterState.put(servers[2], list2); //servers[2] hosts region2
@@ -373,8 +392,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
 
     // start over again
     clusterState.clear();
-    List<HRegionInfo> list3 = new ArrayList<>();
-    HRegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1);
+    List<RegionInfo> list3 = new ArrayList<>();
+    RegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1);
     list3.add(hri4);
     clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1
     clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1
@@ -409,15 +428,15 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
    * @param servers
    * @param assignment
    */
-  private void assertRetainedAssignment(Map<HRegionInfo, ServerName> existing,
-      List<ServerName> servers, Map<ServerName, List<HRegionInfo>> assignment) {
+  private void assertRetainedAssignment(Map<RegionInfo, ServerName> existing,
+      List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignment) {
     // Verify condition 1, every region assigned, and to online server
     Set<ServerName> onlineServerSet = new TreeSet<>(servers);
-    Set<HRegionInfo> assignedRegions = new TreeSet<>();
-    for (Map.Entry<ServerName, List<HRegionInfo>> a : assignment.entrySet()) {
+    Set<RegionInfo> assignedRegions = new TreeSet<>(RegionInfo.COMPARATOR);
+    for (Map.Entry<ServerName, List<RegionInfo>> a : assignment.entrySet()) {
       assertTrue("Region assigned to server that was not listed as online",
         onlineServerSet.contains(a.getKey()));
-      for (HRegionInfo r : a.getValue())
+      for (RegionInfo r : a.getValue())
         assignedRegions.add(r);
     }
     assertEquals(existing.size(), assignedRegions.size());
@@ -428,9 +447,9 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
       onlineHostNames.add(s.getHostname());
     }
 
-    for (Map.Entry<ServerName, List<HRegionInfo>> a : assignment.entrySet()) {
+    for (Map.Entry<ServerName, List<RegionInfo>> a : assignment.entrySet()) {
       ServerName assignedTo = a.getKey();
-      for (HRegionInfo r : a.getValue()) {
+      for (RegionInfo r : a.getValue()) {
         ServerName address = existing.get(r);
         if (address != null && onlineHostNames.contains(address.getHostname())) {
           // this region was prevously assigned somewhere, and that
@@ -447,8 +466,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
     // tests whether the BaseLoadBalancer.Cluster can be constructed with servers
     // sharing same host and port
     List<ServerName> servers = getListOfServerNames(randomServers(10, 10));
-    List<HRegionInfo> regions = randomRegions(101);
-    Map<ServerName, List<HRegionInfo>> clusterState = new HashMap<>();
+    List<RegionInfo> regions = randomRegions(101);
+    Map<ServerName, List<RegionInfo>> clusterState = new HashMap<>();
 
     assignRegions(regions, servers, clusterState);
 
@@ -468,11 +487,11 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
     assertEquals(10, cluster.numServers); // only 10 servers because they share the same host + port
   }
 
-  private void assignRegions(List<HRegionInfo> regions, List<ServerName> servers,
-      Map<ServerName, List<HRegionInfo>> clusterState) {
+  private void assignRegions(List<RegionInfo> regions, List<ServerName> servers,
+      Map<ServerName, List<RegionInfo>> clusterState) {
     for (int i = 0; i < regions.size(); i++) {
       ServerName sn = servers.get(i % servers.size());
-      List<HRegionInfo> regionsOfServer = clusterState.get(sn);
+      List<RegionInfo> regionsOfServer = clusterState.get(sn);
       if (regionsOfServer == null) {
         regionsOfServer = new ArrayList<>(10);
         clusterState.put(sn, regionsOfServer);
@@ -486,8 +505,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
   public void testClusterRegionLocations() {
     // tests whether region locations are handled correctly in Cluster
     List<ServerName> servers = getListOfServerNames(randomServers(10, 10));
-    List<HRegionInfo> regions = randomRegions(101);
-    Map<ServerName, List<HRegionInfo>> clusterState = new HashMap<>();
+    List<RegionInfo> regions = randomRegions(101);
+    Map<ServerName, List<RegionInfo>> clusterState = new HashMap<>();
 
     assignRegions(regions, servers, clusterState);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java
index 610ecf7..4d09bf8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.master.balancer;
 
+import static org.junit.Assert.assertTrue;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -27,15 +29,14 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.RegionPlan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -43,9 +44,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 /**
  * Test the load balancer that is created by default.
  */
@@ -129,16 +127,16 @@ public class TestDefaultLoadBalancer extends BalancerTestBase {
    */
   @Test (timeout=60000)
   public void testBalanceClusterOverall() throws Exception {
-    Map<TableName, Map<ServerName, List<HRegionInfo>>> clusterLoad = new TreeMap<>();
+    Map<TableName, Map<ServerName, List<RegionInfo>>> clusterLoad = new TreeMap<>();
     for (int[] mockCluster : clusterStateMocks) {
-      Map<ServerName, List<HRegionInfo>> clusterServers = mockClusterServers(mockCluster, 50);
+      Map<ServerName, List<RegionInfo>> clusterServers = mockClusterServers(mockCluster, 50);
       List<ServerAndLoad> clusterList = convertToList(clusterServers);
       clusterLoad.put(TableName.valueOf(name.getMethodName()), clusterServers);
-      HashMap<TableName, TreeMap<ServerName, List<HRegionInfo>>> result = mockClusterServersWithTables(clusterServers);
+      HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> result = mockClusterServersWithTables(clusterServers);
       loadBalancer.setClusterLoad(clusterLoad);
       List<RegionPlan> clusterplans = new ArrayList<>();
       List<Pair<TableName, Integer>> regionAmountList = new ArrayList<>();
-      for(TreeMap<ServerName, List<HRegionInfo>> servers : result.values()){
+      for(TreeMap<ServerName, List<RegionInfo>> servers : result.values()){
         List<ServerAndLoad> list = convertToList(servers);
         LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
         List<RegionPlan> partialplans = loadBalancer.balanceCluster(servers);
@@ -146,7 +144,7 @@ public class TestDefaultLoadBalancer extends BalancerTestBase {
         List<ServerAndLoad> balancedClusterPerTable = reconcile(list, partialplans, servers);
         LOG.info("Mock Balance : " + printMock(balancedClusterPerTable));
         assertClusterAsBalanced(balancedClusterPerTable);
-        for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) {
+        for (Map.Entry<ServerName, List<RegionInfo>> entry : servers.entrySet()) {
           returnRegions(entry.getValue());
           returnServer(entry.getKey());
         }
@@ -167,16 +165,16 @@ public class TestDefaultLoadBalancer extends BalancerTestBase {
    */
   @Test (timeout=60000)
   public void testImpactOfBalanceClusterOverall() throws Exception {
-    Map<TableName, Map<ServerName, List<HRegionInfo>>> clusterLoad = new TreeMap<>();
-    Map<ServerName, List<HRegionInfo>> clusterServers = mockUniformClusterServers(mockUniformCluster);
+    Map<TableName, Map<ServerName, List<RegionInfo>>> clusterLoad = new TreeMap<>();
+    Map<ServerName, List<RegionInfo>> clusterServers = mockUniformClusterServers(mockUniformCluster);
     List<ServerAndLoad> clusterList = convertToList(clusterServers);
     clusterLoad.put(TableName.valueOf(name.getMethodName()), clusterServers);
     // use overall can achieve both table and cluster level balance
-    HashMap<TableName, TreeMap<ServerName, List<HRegionInfo>>> result1 = mockClusterServersWithTables(clusterServers);
+    HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> result1 = mockClusterServersWithTables(clusterServers);
     loadBalancer.setClusterLoad(clusterLoad);
     List<RegionPlan> clusterplans1 = new ArrayList<RegionPlan>();
     List<Pair<TableName, Integer>> regionAmountList = new ArrayList<Pair<TableName, Integer>>();
-    for(TreeMap<ServerName, List<HRegionInfo>> servers : result1.values()){
+    for(TreeMap<ServerName, List<RegionInfo>> servers : result1.values()){
       List<ServerAndLoad> list = convertToList(servers);
       LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
       List<RegionPlan> partialplans = loadBalancer.balanceCluster(servers);
@@ -184,7 +182,7 @@ public class TestDefaultLoadBalancer extends BalancerTestBase {
       List<ServerAndLoad> balancedClusterPerTable = reconcile(list, partialplans, servers);
       LOG.info("Mock Balance : " + printMock(balancedClusterPerTable));
       assertClusterAsBalanced(balancedClusterPerTable);
-      for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) {
+      for (Map.Entry<ServerName, List<RegionInfo>> entry : servers.entrySet()) {
         returnRegions(entry.getValue());
         returnServer(entry.getKey());
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java
index 8f2e893..e636cb0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java
@@ -29,26 +29,25 @@ import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.master.RackManager;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.master.RackManager;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.After;
 import org.junit.Before;
@@ -58,6 +57,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 
 @Category(LargeTests.class)
 public class TestFavoredStochasticBalancerPickers extends BalancerTestBase {
@@ -116,7 +116,7 @@ public class TestFavoredStochasticBalancerPickers extends BalancerTestBase {
     ServerName source = getRSWithMaxRegions(Lists.newArrayList(masterServerName, mostLoadedServer));
     assertNotNull(source);
     int regionsToMove = admin.getOnlineRegions(source).size()/2;
-    List<HRegionInfo> hris = admin.getOnlineRegions(source);
+    List<RegionInfo> hris = admin.getRegions(source);
     for (int i = 0; i < regionsToMove; i++) {
       admin.move(hris.get(i).getEncodedNameAsBytes(), Bytes.toBytes(mostLoadedServer.getServerName()));
       LOG.info("Moving region: " + hris.get(i).getRegionNameAsString() + " to " + mostLoadedServer);
@@ -132,11 +132,11 @@ public class TestFavoredStochasticBalancerPickers extends BalancerTestBase {
     });
     TEST_UTIL.getHBaseCluster().startRegionServerAndWait(60000);
 
-    Map<ServerName, List<HRegionInfo>> serverAssignments = Maps.newHashMap();
+    Map<ServerName, List<RegionInfo>> serverAssignments = Maps.newHashMap();
     ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
     for (ServerName sn : status.getServers()) {
       if (!ServerName.isSameAddress(sn, masterServerName)) {
-        serverAssignments.put(sn, admin.getOnlineRegions(sn));
+        serverAssignments.put(sn, admin.getRegions(sn));
       }
     }
     RegionLocationFinder regionFinder = new RegionLocationFinder();
@@ -165,7 +165,7 @@ public class TestFavoredStochasticBalancerPickers extends BalancerTestBase {
         Cluster.Action action = loadPicker.generate(cluster);
         if (action.type == Cluster.Action.Type.MOVE_REGION) {
           Cluster.MoveRegionAction moveRegionAction = (Cluster.MoveRegionAction) action;
-          HRegionInfo region = cluster.regions[moveRegionAction.region];
+          RegionInfo region = cluster.regions[moveRegionAction.region];
           assertNotEquals(-1, moveRegionAction.toServer);
           ServerName destinationServer = cluster.servers[moveRegionAction.toServer];
           assertEquals(cluster.servers[moveRegionAction.fromServer], mostLoadedServer);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
index 4cf3fb6..2f22eeb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
@@ -34,27 +34,27 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
+import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.favored.FavoredNodesPlan;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.favored.FavoredNodesManager;
-import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.junit.After;
@@ -126,13 +126,13 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     assertTrue("Balancer did not run", admin.balancer());
     TEST_UTIL.waitUntilNoRegionsInTransition(120000);
 
-    List<HRegionInfo> hris = admin.getOnlineRegions(rs1.getRegionServer().getServerName());
-    for (HRegionInfo hri : hris) {
+    List<RegionInfo> hris = admin.getRegions(rs1.getRegionServer().getServerName());
+    for (RegionInfo hri : hris) {
       assertFalse("New RS contains regions belonging to table: " + tableName,
         hri.getTable().equals(tableName));
     }
-    hris = admin.getOnlineRegions(rs2.getRegionServer().getServerName());
-    for (HRegionInfo hri : hris) {
+    hris = admin.getRegions(rs2.getRegionServer().getServerName());
+    for (RegionInfo hri : hris) {
       assertFalse("New RS contains regions belonging to table: " + tableName,
         hri.getTable().equals(tableName));
     }
@@ -150,13 +150,13 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     admin.flush(tableName);
 
     LoadBalancer balancer = master.getLoadBalancer();
-    List<HRegionInfo> regions = admin.getTableRegions(tableName);
+    List<RegionInfo> regions = admin.getRegions(tableName);
     regions.addAll(admin.getTableRegions(TableName.META_TABLE_NAME));
     regions.addAll(admin.getTableRegions(TableName.NAMESPACE_TABLE_NAME));
     List<ServerName> servers = Lists.newArrayList(
       admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers());
-    Map<ServerName, List<HRegionInfo>> map = balancer.roundRobinAssignment(regions, servers);
-    for (List<HRegionInfo> regionInfos : map.values()) {
+    Map<ServerName, List<RegionInfo>> map = balancer.roundRobinAssignment(regions, servers);
+    for (List<RegionInfo> regionInfos : map.values()) {
       regions.removeAll(regionInfos);
     }
     assertEquals("No region should be missed by balancer", 0, regions.size());
@@ -173,8 +173,8 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     TEST_UTIL.waitTableAvailable(desc.getTableName());
 
     FavoredNodesManager fnm = master.getFavoredNodesManager();
-    List<HRegionInfo> regionsOfTable = admin.getTableRegions(TableName.valueOf(tableName));
-    for (HRegionInfo rInfo : regionsOfTable) {
+    List<RegionInfo> regionsOfTable = admin.getRegions(TableName.valueOf(tableName));
+    for (RegionInfo rInfo : regionsOfTable) {
       Set<ServerName> favNodes = Sets.newHashSet(fnm.getFavoredNodes(rInfo));
       assertNotNull(favNodes);
       assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favNodes.size());
@@ -212,7 +212,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     admin.createTable(desc);
     TEST_UTIL.waitTableAvailable(desc.getTableName());
 
-    HRegionInfo hri = admin.getTableRegions(TableName.valueOf(tableName)).get(0);
+    RegionInfo hri = admin.getTableRegions(TableName.valueOf(tableName)).get(0);
 
     FavoredNodesManager fnm = master.getFavoredNodesManager();
     fnm.deleteFavoredNodesForRegions(Lists.newArrayList(hri));
@@ -242,7 +242,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     admin.createTable(desc, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM);
     TEST_UTIL.waitTableAvailable(tableName);
 
-    final HRegionInfo region = admin.getTableRegions(tableName).get(0);
+    final RegionInfo region = admin.getTableRegions(tableName).get(0);
     LOG.info("Region thats supposed to be in transition: " + region);
     FavoredNodesManager fnm = master.getFavoredNodesManager();
     List<ServerName> currentFN = fnm.getFavoredNodes(region);
@@ -280,7 +280,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     admin.createTable(desc, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM);
     TEST_UTIL.waitTableAvailable(tableName);
 
-    final HRegionInfo misplacedRegion = admin.getTableRegions(tableName).get(0);
+    final RegionInfo misplacedRegion = admin.getTableRegions(tableName).get(0);
     FavoredNodesManager fnm = master.getFavoredNodesManager();
     List<ServerName> currentFN = fnm.getFavoredNodes(misplacedRegion);
     assertNotNull(currentFN);
@@ -297,7 +297,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     List<ServerName> newFavoredNodes = helper.generateFavoredNodes(misplacedRegion);
     assertNotNull(newFavoredNodes);
     assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, newFavoredNodes.size());
-    Map<HRegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
+    Map<RegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
     regionFNMap.put(misplacedRegion, newFavoredNodes);
     fnm.updateFavoredNodes(regionFNMap);
 
@@ -326,7 +326,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     admin.createTable(desc, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM);
     TEST_UTIL.waitTableAvailable(tableName);
 
-    final HRegionInfo region = admin.getTableRegions(tableName).get(0);
+    final RegionInfo region = admin.getTableRegions(tableName).get(0);
     LOG.info("Region that's supposed to be in transition: " + region);
     FavoredNodesManager fnm = master.getFavoredNodesManager();
     List<ServerName> currentFN = fnm.getFavoredNodes(region);
@@ -364,7 +364,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     admin.createTable(desc, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM);
     TEST_UTIL.waitTableAvailable(tableName);
 
-    final HRegionInfo region = admin.getTableRegions(tableName).get(0);
+    final RegionInfo region = admin.getTableRegions(tableName).get(0);
     LOG.info("Region that's supposed to be in transition: " + region);
     FavoredNodesManager fnm = master.getFavoredNodesManager();
     List<ServerName> currentFN = fnm.getFavoredNodes(region);
@@ -394,13 +394,13 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     helper.initialize();
 
     for (RegionStateNode regionState: regionStates.getRegionsInTransition()) {
-      HRegionInfo regionInfo = regionState.getRegionInfo();
+      RegionInfo regionInfo = regionState.getRegionInfo();
       List<ServerName> newFavoredNodes = helper.generateFavoredNodes(regionInfo);
       assertNotNull(newFavoredNodes);
       assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, newFavoredNodes.size());
       LOG.info("Region: " + regionInfo.getEncodedName() + " FN: " + newFavoredNodes);
 
-      Map<HRegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
+      Map<RegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
       regionFNMap.put(regionInfo, newFavoredNodes);
       fnm.updateFavoredNodes(regionFNMap);
       LOG.info("Assigning region: " + regionInfo.getEncodedName());
@@ -425,7 +425,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     admin.createTable(desc, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM);
     TEST_UTIL.waitTableAvailable(tableName);
 
-    final HRegionInfo region = admin.getTableRegions(tableName).get(0);
+    final RegionInfo region = admin.getTableRegions(tableName).get(0);
     LOG.info("Region that's supposed to be in transition: " + region);
     FavoredNodesManager fnm = master.getFavoredNodesManager();
     List<ServerName> currentFN = fnm.getFavoredNodes(region);
@@ -446,9 +446,9 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     assertTrue("Region: " + region + " should be RIT",
         regionStatesBeforeMaster.getRegionState(region).isFailedOpen());
 
-    List<HRegionInfo> rit = Lists.newArrayList();
+    List<RegionInfo> rit = Lists.newArrayList();
     for (RegionStateNode regionState: regionStatesBeforeMaster.getRegionsInTransition()) {
-      HRegionInfo regionInfo = regionState.getRegionInfo();
+      RegionInfo regionInfo = regionState.getRegionInfo();
       LOG.debug("Region in transition after stopping FN's: " + regionInfo);
       rit.add(regionInfo);
       assertTrue("Region: " + regionInfo + " should be RIT",
@@ -473,7 +473,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     assertTrue("Region: " + region + " should be RIT",
         regionStates.getRegionState(region).isFailedOpen());
 
-    for (HRegionInfo regionInfo : rit) {
+    for (RegionInfo regionInfo : rit) {
       assertTrue("Region: " + regionInfo + " should be RIT",
           regionStates.getRegionState(regionInfo).isFailedOpen());
     }
@@ -487,13 +487,13 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(serversForNewFN, conf);
     helper.initialize();
 
-    for (HRegionInfo regionInfo : rit) {
+    for (RegionInfo regionInfo : rit) {
       List<ServerName> newFavoredNodes = helper.generateFavoredNodes(regionInfo);
       assertNotNull(newFavoredNodes);
       assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, newFavoredNodes.size());
       LOG.info("Region: " + regionInfo.getEncodedName() + " FN: " + newFavoredNodes);
 
-      Map<HRegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
+      Map<RegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
       regionFNMap.put(regionInfo, newFavoredNodes);
       fnm.updateFavoredNodes(regionFNMap);
       LOG.info("Assigning region: " + regionInfo.getEncodedName());
@@ -511,7 +511,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
 
   private void checkFavoredNodeAssignments(TableName tableName, FavoredNodesManager fnm,
       RegionStates regionStates) throws IOException {
-    for (HRegionInfo hri : admin.getTableRegions(tableName)) {
+    for (RegionInfo hri : admin.getTableRegions(tableName)) {
       ServerName host = regionStates.getRegionServerOfRegion(hri);
       assertNotNull("Region: " + hri.getEncodedName() + " not on FN, current: " + host
               + " FN list: " + fnm.getFavoredNodes(hri),

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java
index b96dcb5..010f57a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java
@@ -26,10 +26,10 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -84,7 +84,7 @@ public class TestRegionLocationFinder {
     for (int i = 0; i < ServerNum; i++) {
       HRegionServer server = cluster.getRegionServer(i);
       for (Region region : server.getRegions(tableName)) {
-        // get region's hdfs block distribution by region and RegionLocationFinder, 
+        // get region's hdfs block distribution by region and RegionLocationFinder,
         // they should have same result
         HDFSBlocksDistribution blocksDistribution1 = region.getHDFSBlocksDistribution();
         HDFSBlocksDistribution blocksDistribution2 = finder.getBlockDistribution(region
@@ -151,12 +151,12 @@ public class TestRegionLocationFinder {
       if (regions.size() <= 0) {
         continue;
       }
-      List<HRegionInfo> regionInfos = new ArrayList<>(regions.size());
+      List<RegionInfo> regionInfos = new ArrayList<>(regions.size());
       for (Region region : regions) {
         regionInfos.add(region.getRegionInfo());
       }
       finder.refreshAndWait(regionInfos);
-      for (HRegionInfo regionInfo : regionInfos) {
+      for (RegionInfo regionInfo : regionInfos) {
         assertNotNull(finder.getCache().getIfPresent(regionInfo));
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
index b97567d..e547f87 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
@@ -39,10 +39,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.MockNoopMasterServices;
 import org.apache.hadoop.hbase.master.RackManager;
@@ -159,7 +159,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f);
     loadBalancer.setConf(conf);
     for (int[] mockCluster : clusterStateMocks) {
-      Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster);
+      Map<ServerName, List<RegionInfo>> servers = mockClusterServers(mockCluster);
       List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
       assertNull(plans);
     }
@@ -183,7 +183,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
     loadBalancer.setConf(conf);
     for (int[] mockCluster : clusterStateMocks) {
-      Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster);
+      Map<ServerName, List<RegionInfo>> servers = mockClusterServers(mockCluster);
       List<ServerAndLoad> list = convertToList(servers);
       LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
       List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
@@ -192,7 +192,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
       assertClusterAsBalanced(balancedCluster);
       List<RegionPlan> secondPlans =  loadBalancer.balanceCluster(servers);
       assertNull(secondPlans);
-      for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) {
+      for (Map.Entry<ServerName, List<RegionInfo>> entry : servers.entrySet()) {
         returnRegions(entry.getValue());
         returnServer(entry.getKey());
       }
@@ -375,7 +375,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     int replication = 1;
     int numTables = 2;
 
-    Map<ServerName, List<HRegionInfo>> serverMap =
+    Map<ServerName, List<RegionInfo>> serverMap =
         createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
     List<ServerAndLoad> list = convertToList(serverMap);
 
@@ -419,7 +419,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
         costFunction = new StochasticLoadBalancer.RegionReplicaHostCostFunction(conf);
 
     int [] servers = new int[] {3,3,3,3,3};
-    TreeMap<ServerName, List<HRegionInfo>> clusterState = mockClusterServers(servers);
+    TreeMap<ServerName, List<RegionInfo>> clusterState = mockClusterServers(servers);
 
     BaseLoadBalancer.Cluster cluster;
 
@@ -429,7 +429,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     assertEquals(0, costWithoutReplicas, 0);
 
     // replicate the region from first server to the last server
-    HRegionInfo replica1 = RegionReplicaUtil.getRegionInfoForReplica(
+    RegionInfo replica1 = RegionReplicaUtil.getRegionInfoForReplica(
       clusterState.firstEntry().getValue().get(0),1);
     clusterState.lastEntry().getValue().add(replica1);
 
@@ -440,7 +440,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     assertEquals(0, costWith1ReplicaDifferentServer, 0);
 
     // add a third replica to the last server
-    HRegionInfo replica2 = RegionReplicaUtil.getRegionInfoForReplica(replica1, 2);
+    RegionInfo replica2 = RegionReplicaUtil.getRegionInfoForReplica(replica1, 2);
     clusterState.lastEntry().getValue().add(replica2);
 
     cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
@@ -451,14 +451,14 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
 
     // test with replication = 4 for following:
 
-    HRegionInfo replica3;
-    Iterator<Entry<ServerName, List<HRegionInfo>>> it;
-    Entry<ServerName, List<HRegionInfo>> entry;
+    RegionInfo replica3;
+    Iterator<Entry<ServerName, List<RegionInfo>>> it;
+    Entry<ServerName, List<RegionInfo>> entry;
 
     clusterState = mockClusterServers(servers);
     it = clusterState.entrySet().iterator();
     entry = it.next(); //first server
-    HRegionInfo hri = entry.getValue().get(0);
+    RegionInfo hri = entry.getValue().get(0);
     replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1);
     replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2);
     replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3);
@@ -491,10 +491,10 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
   public void testNeedsBalanceForColocatedReplicas() {
     // check for the case where there are two hosts and with one rack, and where
     // both the replicas are hosted on the same server
-    List<HRegionInfo> regions = randomRegions(1);
+    List<RegionInfo> regions = randomRegions(1);
     ServerName s1 = ServerName.valueOf("host1", 1000, 11111);
     ServerName s2 = ServerName.valueOf("host11", 1000, 11111);
-    Map<ServerName, List<HRegionInfo>> map = new HashMap<>();
+    Map<ServerName, List<RegionInfo>> map = new HashMap<>();
     map.put(s1, regions);
     regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1));
     // until the step above s1 holds two replicas of a region
@@ -505,7 +505,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     // and both the replicas are on the same rack
     map.clear();
     regions = randomRegions(1);
-    List<HRegionInfo> regionsOnS2 = new ArrayList<>(1);
+    List<RegionInfo> regionsOnS2 = new ArrayList<>(1);
     regionsOnS2.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1));
     map.put(s1, regions);
     map.put(s2, regionsOnS2);
@@ -615,13 +615,13 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     int replication = 3; // 3 replicas per region
     int numRegionsPerServer = 5;
     int numTables = 10;
-    Map<ServerName, List<HRegionInfo>> serverMap =
+    Map<ServerName, List<RegionInfo>> serverMap =
         createServerMap(numHosts, numRegions, numRegionsPerServer, replication, numTables);
     int numNodesPerHost = 4;
 
     // create a new map with 4 RS per host.
-    Map<ServerName, List<HRegionInfo>> newServerMap = new TreeMap<>(serverMap);
-    for (Map.Entry<ServerName, List<HRegionInfo>> entry : serverMap.entrySet()) {
+    Map<ServerName, List<RegionInfo>> newServerMap = new TreeMap<>(serverMap);
+    for (Map.Entry<ServerName, List<RegionInfo>> entry : serverMap.entrySet()) {
       for (int i=1; i < numNodesPerHost; i++) {
         ServerName s1 = entry.getKey();
         ServerName s2 = ServerName.valueOf(s1.getHostname(), s1.getPort() + i, 1); // create an RS for the same host
@@ -662,7 +662,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     int numRegionsPerServer = 28;
     int numTables = 10;
     int numRacks = 4; // all replicas should be on a different rack
-    Map<ServerName, List<HRegionInfo>> serverMap =
+    Map<ServerName, List<RegionInfo>> serverMap =
         createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
     RackManager rm = new ForTestRackManager(numRacks);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
index e338849..ce02395 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.locking.LockServiceClient;
 import org.apache.hadoop.hbase.master.MasterRpcServices;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
@@ -46,12 +46,6 @@ import org.apache.hadoop.hbase.procedure2.LockType;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.hamcrest.core.IsInstanceOf;
@@ -67,6 +61,13 @@ import org.junit.rules.ExpectedException;
 import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
+
 @Category({MasterTests.class, SmallTests.class})
 public class TestLockProcedure {
   @Rule
@@ -87,9 +88,9 @@ public class TestLockProcedure {
 
   private static String namespace = "namespace";
   private static TableName tableName1 = TableName.valueOf(namespace, "table1");
-  private static List<HRegionInfo> tableRegions1;
+  private static List<RegionInfo> tableRegions1;
   private static TableName tableName2 = TableName.valueOf(namespace, "table2");
-  private static List<HRegionInfo> tableRegions2;
+  private static List<RegionInfo> tableRegions2;
 
   private String testMethodName;
 
@@ -109,8 +110,8 @@ public class TestLockProcedure {
     UTIL.createTable(tableName2, new byte[][]{"fam".getBytes()}, new byte[][] {"1".getBytes()});
     masterRpcService = UTIL.getHBaseCluster().getMaster().getMasterRpcServices();
     procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
-    tableRegions1 = UTIL.getAdmin().getTableRegions(tableName1);
-    tableRegions2 = UTIL.getAdmin().getTableRegions(tableName2);
+    tableRegions1 = UTIL.getAdmin().getRegions(tableName1);
+    tableRegions2 = UTIL.getAdmin().getRegions(tableName2);
     assert tableRegions1.size() > 0;
     assert tableRegions2.size() > 0;
   }
@@ -151,7 +152,7 @@ public class TestLockProcedure {
         null, tableName, null, description, HConstants.NO_NONCE, HConstants.NO_NONCE);
   }
 
-  private LockRequest getRegionLock(List<HRegionInfo> regionInfos, String description) {
+  private LockRequest getRegionLock(List<RegionInfo> regionInfos, String description) {
     return LockServiceClient.buildLockRequest(LockServiceProtos.LockType.EXCLUSIVE,
         null, null, regionInfos, description, HConstants.NO_NONCE, HConstants.NO_NONCE);
   }
@@ -178,7 +179,7 @@ public class TestLockProcedure {
 
   @Test
   public void testLockRequestValidationRegionsFromDifferentTable() throws Exception {
-    List<HRegionInfo> regions = new ArrayList<>();
+    List<RegionInfo> regions = new ArrayList<>();
     regions.addAll(tableRegions1);
     regions.addAll(tableRegions2);
     validateLockRequestException(getRegionLock(regions, "desc"),

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
index e25a61f..ab6d7d0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
@@ -32,16 +32,13 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.master.MasterRpcServices;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -52,6 +49,11 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
 
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
+
 /**
  * Tests logic of {@link SimpleRegionNormalizer}.
  */
@@ -76,10 +78,10 @@ public class TestSimpleRegionNormalizer {
   @Test
   public void testNoNormalizationForMetaTable() throws HBaseIOException {
     TableName testTable = TableName.META_TABLE_NAME;
-    List<HRegionInfo> hris = new ArrayList<>();
+    List<RegionInfo> RegionInfo = new ArrayList<>();
     Map<byte[], Integer> regionSizes = new HashMap<>();
 
-    setupMocksForNormalizer(regionSizes, hris);
+    setupMocksForNormalizer(regionSizes, RegionInfo);
     List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable);
     assertTrue(plans == null);
   }
@@ -87,18 +89,23 @@ public class TestSimpleRegionNormalizer {
   @Test
   public void testNoNormalizationIfTooFewRegions() throws HBaseIOException {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    List<HRegionInfo> hris = new ArrayList<>();
+    List<RegionInfo> RegionInfo = new ArrayList<>();
     Map<byte[], Integer> regionSizes = new HashMap<>();
-
-    HRegionInfo hri1 = new HRegionInfo(tableName, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
-    hris.add(hri1);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("aaa"))
+        .setEndKey(Bytes.toBytes("bbb"))
+        .build();
+    RegionInfo.add(hri1);
     regionSizes.put(hri1.getRegionName(), 10);
 
-    HRegionInfo hri2 = new HRegionInfo(tableName, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
-    hris.add(hri2);
+    RegionInfo hri2 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("bbb"))
+        .setEndKey(Bytes.toBytes("ccc"))
+        .build();
+    RegionInfo.add(hri2);
     regionSizes.put(hri2.getRegionName(), 15);
 
-    setupMocksForNormalizer(regionSizes, hris);
+    setupMocksForNormalizer(regionSizes, RegionInfo);
     List<NormalizationPlan> plans = normalizer.computePlanForTable(tableName);
     assertTrue(plans == null);
   }
@@ -106,26 +113,37 @@ public class TestSimpleRegionNormalizer {
   @Test
   public void testNoNormalizationOnNormalizedCluster() throws HBaseIOException {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    List<HRegionInfo> hris = new ArrayList<>();
+    List<RegionInfo> RegionInfo = new ArrayList<>();
     Map<byte[], Integer> regionSizes = new HashMap<>();
 
-    HRegionInfo hri1 = new HRegionInfo(tableName, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
-    hris.add(hri1);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("aaa"))
+        .setEndKey(Bytes.toBytes("bbb"))
+        .build();
+    RegionInfo.add(hri1);
     regionSizes.put(hri1.getRegionName(), 10);
 
-    HRegionInfo hri2 = new HRegionInfo(tableName, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
-    hris.add(hri2);
+    RegionInfo hri2 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("bbb"))
+        .setEndKey(Bytes.toBytes("ccc"))
+        .build();
+    RegionInfo.add(hri2);
     regionSizes.put(hri2.getRegionName(), 15);
 
-    HRegionInfo hri3 = new HRegionInfo(tableName, Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
-    hris.add(hri3);
+    RegionInfo hri3 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ccc"))
+        .setEndKey(Bytes.toBytes("ddd"))
+        .build();
+    RegionInfo.add(hri3);
     regionSizes.put(hri3.getRegionName(), 8);
 
-    HRegionInfo hri4 = new HRegionInfo(tableName, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
-    hris.add(hri4);
+    RegionInfo hri4 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ddd"))
+        .setEndKey(Bytes.toBytes("eee"))
+        .build();
     regionSizes.put(hri4.getRegionName(), 10);
 
-    setupMocksForNormalizer(regionSizes, hris);
+    setupMocksForNormalizer(regionSizes, RegionInfo);
     List<NormalizationPlan> plans = normalizer.computePlanForTable(tableName);
     assertTrue(plans == null);
   }
@@ -133,30 +151,45 @@ public class TestSimpleRegionNormalizer {
   @Test
   public void testMergeOfSmallRegions() throws HBaseIOException {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    List<HRegionInfo> hris = new ArrayList<>();
+    List<RegionInfo> RegionInfo = new ArrayList<>();
     Map<byte[], Integer> regionSizes = new HashMap<>();
 
-    HRegionInfo hri1 = new HRegionInfo(tableName, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
-    hris.add(hri1);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("aaa"))
+        .setEndKey(Bytes.toBytes("bbb"))
+        .build();
+    RegionInfo.add(hri1);
     regionSizes.put(hri1.getRegionName(), 15);
 
-    HRegionInfo hri2 = new HRegionInfo(tableName, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
-    hris.add(hri2);
+    RegionInfo hri2 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("bbb"))
+        .setEndKey(Bytes.toBytes("ccc"))
+        .build();
+    RegionInfo.add(hri2);
     regionSizes.put(hri2.getRegionName(), 5);
 
-    HRegionInfo hri3 = new HRegionInfo(tableName, Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
-    hris.add(hri3);
+    RegionInfo hri3 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ccc"))
+        .setEndKey(Bytes.toBytes("ddd"))
+        .build();
+    RegionInfo.add(hri3);
     regionSizes.put(hri3.getRegionName(), 5);
 
-    HRegionInfo hri4 = new HRegionInfo(tableName, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
-    hris.add(hri4);
+    RegionInfo hri4 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ddd"))
+        .setEndKey(Bytes.toBytes("eee"))
+        .build();
+    RegionInfo.add(hri4);
     regionSizes.put(hri4.getRegionName(), 15);
 
-    HRegionInfo hri5 = new HRegionInfo(tableName, Bytes.toBytes("eee"), Bytes.toBytes("fff"));
-    hris.add(hri5);
+    RegionInfo hri5 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("eee"))
+        .setEndKey(Bytes.toBytes("fff"))
+        .build();
+    RegionInfo.add(hri5);
     regionSizes.put(hri5.getRegionName(), 16);
 
-    setupMocksForNormalizer(regionSizes, hris);
+    setupMocksForNormalizer(regionSizes, RegionInfo);
     List<NormalizationPlan> plans = normalizer.computePlanForTable(tableName);
 
     NormalizationPlan plan = plans.get(0);
@@ -169,34 +202,52 @@ public class TestSimpleRegionNormalizer {
   @Test
   public void testMergeOfSecondSmallestRegions() throws HBaseIOException {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    List<HRegionInfo> hris = new ArrayList<>();
+    List<RegionInfo> RegionInfo = new ArrayList<>();
     Map<byte[], Integer> regionSizes = new HashMap<>();
 
-    HRegionInfo hri1 = new HRegionInfo(tableName, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
-    hris.add(hri1);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("aaa"))
+        .setEndKey(Bytes.toBytes("bbb"))
+        .build();
+    RegionInfo.add(hri1);
     regionSizes.put(hri1.getRegionName(), 1);
 
-    HRegionInfo hri2 = new HRegionInfo(tableName, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
-    hris.add(hri2);
+    RegionInfo hri2 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("bbb"))
+        .setEndKey(Bytes.toBytes("ccc"))
+        .build();
+    RegionInfo.add(hri2);
     regionSizes.put(hri2.getRegionName(), 10000);
 
-    HRegionInfo hri3 = new HRegionInfo(tableName, Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
-    hris.add(hri3);
+    RegionInfo hri3 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ccc"))
+        .setEndKey(Bytes.toBytes("ddd"))
+        .build();
+    RegionInfo.add(hri3);
     regionSizes.put(hri3.getRegionName(), 10000);
 
-    HRegionInfo hri4 = new HRegionInfo(tableName, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
-    hris.add(hri4);
+    RegionInfo hri4 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ddd"))
+        .setEndKey(Bytes.toBytes("eee"))
+        .build();
+    RegionInfo.add(hri4);
     regionSizes.put(hri4.getRegionName(), 10000);
 
-    HRegionInfo hri5 = new HRegionInfo(tableName, Bytes.toBytes("eee"), Bytes.toBytes("fff"));
-    hris.add(hri5);
+    RegionInfo hri5 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("eee"))
+        .setEndKey(Bytes.toBytes("fff"))
+        .build();
+    RegionInfo.add(hri5);
     regionSizes.put(hri5.getRegionName(), 2700);
 
-    HRegionInfo hri6 = new HRegionInfo(tableName, Bytes.toBytes("fff"), Bytes.toBytes("ggg"));
-    hris.add(hri6);
+    RegionInfo hri6 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("fff"))
+        .setEndKey(Bytes.toBytes("ggg"))
+        .build();
+    RegionInfo.add(hri6);
     regionSizes.put(hri6.getRegionName(), 2700);
 
-    setupMocksForNormalizer(regionSizes, hris);
+    setupMocksForNormalizer(regionSizes, RegionInfo);
     List<NormalizationPlan> plans = normalizer.computePlanForTable(tableName);
     NormalizationPlan plan = plans.get(0);
 
@@ -208,30 +259,45 @@ public class TestSimpleRegionNormalizer {
   @Test
   public void testMergeOfSmallNonAdjacentRegions() throws HBaseIOException {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    List<HRegionInfo> hris = new ArrayList<>();
+    List<RegionInfo> RegionInfo = new ArrayList<>();
     Map<byte[], Integer> regionSizes = new HashMap<>();
 
-    HRegionInfo hri1 = new HRegionInfo(tableName, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
-    hris.add(hri1);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("aaa"))
+        .setEndKey(Bytes.toBytes("bbb"))
+        .build();
+    RegionInfo.add(hri1);
     regionSizes.put(hri1.getRegionName(), 15);
 
-    HRegionInfo hri2 = new HRegionInfo(tableName, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
-    hris.add(hri2);
+    RegionInfo hri2 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("bbb"))
+        .setEndKey(Bytes.toBytes("ccc"))
+        .build();
+    RegionInfo.add(hri2);
     regionSizes.put(hri2.getRegionName(), 5);
 
-    HRegionInfo hri3 = new HRegionInfo(tableName, Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
-    hris.add(hri3);
+    RegionInfo hri3 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ccc"))
+        .setEndKey(Bytes.toBytes("ddd"))
+        .build();
+    RegionInfo.add(hri3);
     regionSizes.put(hri3.getRegionName(), 16);
 
-    HRegionInfo hri4 = new HRegionInfo(tableName, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
-    hris.add(hri4);
+    RegionInfo hri4 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ddd"))
+        .setEndKey(Bytes.toBytes("eee"))
+        .build();
+    RegionInfo.add(hri4);
     regionSizes.put(hri4.getRegionName(), 15);
 
-    HRegionInfo hri5 = new HRegionInfo(tableName, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
-    hris.add(hri4);
+    RegionInfo hri5 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ddd"))
+        .setEndKey(Bytes.toBytes("eee"))
+        .build();
+    RegionInfo.add(hri4);
     regionSizes.put(hri5.getRegionName(), 5);
 
-    setupMocksForNormalizer(regionSizes, hris);
+    setupMocksForNormalizer(regionSizes, RegionInfo);
     List<NormalizationPlan> plans = normalizer.computePlanForTable(tableName);
 
     assertTrue(plans == null);
@@ -240,26 +306,38 @@ public class TestSimpleRegionNormalizer {
   @Test
   public void testSplitOfLargeRegion() throws HBaseIOException {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    List<HRegionInfo> hris = new ArrayList<>();
+    List<RegionInfo> RegionInfo = new ArrayList<>();
     Map<byte[], Integer> regionSizes = new HashMap<>();
 
-    HRegionInfo hri1 = new HRegionInfo(tableName, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
-    hris.add(hri1);
+    RegionInfo hri1 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("aaa"))
+        .setEndKey(Bytes.toBytes("bbb"))
+        .build();
+    RegionInfo.add(hri1);
     regionSizes.put(hri1.getRegionName(), 8);
 
-    HRegionInfo hri2 = new HRegionInfo(tableName, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
-    hris.add(hri2);
+    RegionInfo hri2 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("bbb"))
+        .setEndKey(Bytes.toBytes("ccc"))
+        .build();
+    RegionInfo.add(hri2);
     regionSizes.put(hri2.getRegionName(), 6);
 
-    HRegionInfo hri3 = new HRegionInfo(tableName, Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
-    hris.add(hri3);
+    RegionInfo hri3 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ccc"))
+        .setEndKey(Bytes.toBytes("ddd"))
+        .build();
+    RegionInfo.add(hri3);
     regionSizes.put(hri3.getRegionName(), 10);
 
-    HRegionInfo hri4 = new HRegionInfo(tableName, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
-    hris.add(hri4);
+    RegionInfo hri4 = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(Bytes.toBytes("ddd"))
+        .setEndKey(Bytes.toBytes("eee"))
+        .build();
+    RegionInfo.add(hri4);
     regionSizes.put(hri4.getRegionName(), 30);
 
-    setupMocksForNormalizer(regionSizes, hris);
+    setupMocksForNormalizer(regionSizes, RegionInfo);
     List<NormalizationPlan> plans = normalizer.computePlanForTable(tableName);
     NormalizationPlan plan = plans.get(0);
 
@@ -268,16 +346,16 @@ public class TestSimpleRegionNormalizer {
   }
 
   protected void setupMocksForNormalizer(Map<byte[], Integer> regionSizes,
-                                         List<HRegionInfo> hris) {
+                                         List<RegionInfo> RegionInfo) {
     masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS);
     masterRpcServices = Mockito.mock(MasterRpcServices.class, RETURNS_DEEP_STUBS);
 
     // for simplicity all regions are assumed to be on one server; doesn't matter to us
     ServerName sn = ServerName.valueOf("localhost", 0, 1L);
     when(masterServices.getAssignmentManager().getRegionStates().
-      getRegionsOfTable(any(TableName.class))).thenReturn(hris);
+      getRegionsOfTable(any(TableName.class))).thenReturn(RegionInfo);
     when(masterServices.getAssignmentManager().getRegionStates().
-      getRegionServerOfRegion(any(HRegionInfo.class))).thenReturn(sn);
+      getRegionServerOfRegion(any(RegionInfo.class))).thenReturn(sn);
 
     for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
       RegionLoad regionLoad = Mockito.mock(RegionLoad.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
index d733d2b..8fe53af 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
@@ -18,6 +18,13 @@
  */
 package org.apache.hadoop.hbase.master.normalizer;
 
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -29,6 +36,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.TableNamespaceManager;
@@ -48,13 +56,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-
 /**
  * Testing {@link SimpleRegionNormalizer} on minicluster.
  */
@@ -112,13 +113,7 @@ public class TestSimpleRegionNormalizerOnCluster {
     try (Table ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) {
       // Need to get sorted list of regions here
       List<HRegion> generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME);
-      Collections.sort(generatedRegions, new Comparator<HRegion>() {
-        @Override
-        public int compare(HRegion o1, HRegion o2) {
-          return o1.getRegionInfo().compareTo(o2.getRegionInfo());
-        }
-      });
-
+      Collections.sort(generatedRegions, Comparator.comparing(HRegion::getRegionInfo, RegionInfo.COMPARATOR));
       HRegion region = generatedRegions.get(0);
       generateTestData(region, 1);
       region.flush(true);
@@ -189,12 +184,7 @@ public class TestSimpleRegionNormalizerOnCluster {
     try (Table ht = TEST_UTIL.createMultiRegionTable(tableName, FAMILYNAME, 5)) {
       // Need to get sorted list of regions here
       List<HRegion> generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
-      Collections.sort(generatedRegions, new Comparator<HRegion>() {
-        @Override
-        public int compare(HRegion o1, HRegion o2) {
-          return o1.getRegionInfo().compareTo(o2.getRegionInfo());
-        }
-      });
+      Collections.sort(generatedRegions, Comparator.comparing(HRegion::getRegionInfo, RegionInfo.COMPARATOR));
 
       HRegion region = generatedRegions.get(0);
       generateTestData(region, 1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 1ca8518..b87c343 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -33,20 +33,19 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
@@ -63,6 +62,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.MD5Hash;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class MasterProcedureTestingUtility {
@@ -148,10 +148,10 @@ public class MasterProcedureTestingUtility {
     return builder.build();
   }
 
-  public static HRegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec,
+  public static RegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec,
       final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
     TableDescriptor htd = createHTD(tableName, family);
-    HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+    RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
     long procId = ProcedureTestingUtility.submitAndWait(procExec,
       new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
     ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
@@ -159,12 +159,12 @@ public class MasterProcedureTestingUtility {
   }
 
   public static void validateTableCreation(final HMaster master, final TableName tableName,
-      final HRegionInfo[] regions, String... family) throws IOException {
+      final RegionInfo[] regions, String... family) throws IOException {
     validateTableCreation(master, tableName, regions, true, family);
   }
 
   public static void validateTableCreation(final HMaster master, final TableName tableName,
-      final HRegionInfo[] regions, boolean hasFamilyDirs, String... family) throws IOException {
+      final RegionInfo[] regions, boolean hasFamilyDirs, String... family) throws IOException {
     // check filesystem
     final FileSystem fs = master.getMasterFileSystem().getFileSystem();
     final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
@@ -230,7 +230,7 @@ public class MasterProcedureTestingUtility {
       public boolean visit(Result rowResult) throws IOException {
         RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
         if (list == null) {
-          LOG.warn("No serialized HRegionInfo in " + rowResult);
+          LOG.warn("No serialized RegionInfo in " + rowResult);
           return true;
         }
         HRegionLocation l = list.getRegionLocation();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 687f953..c57f210 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -18,13 +18,16 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -39,9 +42,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
 @Category({MasterTests.class, MediumTests.class})
 public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
   private static final Log LOG = LogFactory.getLog(TestCreateTableProcedure.class);
@@ -68,7 +68,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
   }
 
   private void testSimpleCreate(final TableName tableName, byte[][] splitKeys) throws Exception {
-    HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+    RegionInfo[] regions = MasterProcedureTestingUtility.createTable(
       getMasterProcedureExecutor(), tableName, splitKeys, F1, F2);
     MasterProcedureTestingUtility.validateTableCreation(getMaster(), tableName, regions, F1, F2);
   }
@@ -83,7 +83,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
     // disable sanity check
     builder.setValue("hbase.table.sanity.checks", Boolean.FALSE.toString());
     TableDescriptor htd = builder.build();
-    final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
+    final RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null);
 
     long procId =
         ProcedureTestingUtility.submitAndWait(procExec,
@@ -100,7 +100,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
     final TableName tableName = TableName.valueOf(name.getMethodName());
     final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
     final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
-    final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
+    final RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null);
 
     // create the table
     long procId1 = procExec.submitProcedure(
@@ -129,7 +129,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
     // Start the Create procedure && kill the executor
     byte[][] splitKeys = null;
     TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
-    HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+    RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
     long procId = procExec.submitProcedure(
       new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
 
@@ -166,7 +166,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
     };
     builder.setRegionReplication(3);
     TableDescriptor htd = builder.build();
-    HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+    RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys);
     long procId = procExec.submitProcedure(
       new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
index d126251..9747da6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
@@ -23,9 +23,9 @@ import static org.junit.Assert.assertTrue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -193,7 +193,7 @@ public class TestDeleteColumnFamilyProcedure extends TestTableDDLProcedureBase {
     final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
 
     // create the table
-    HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+    RegionInfo[] regions = MasterProcedureTestingUtility.createTable(
       procExec, tableName, null, "f1", "f2", "f3", cf5);
     ProcedureTestingUtility.waitNoProcedureRunning(procExec);
     ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);