You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/09/28 12:30:46 UTC

[16/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index 169e42f..3f1373f 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -18,13 +18,6 @@
 
 package org.apache.hadoop.hbase.rsgroup;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.LinkedListMultimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -43,10 +36,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -54,6 +46,14 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.LinkedListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 
 /**
  * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721)
@@ -106,31 +106,31 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   @Override
-  public List<RegionPlan> balanceCluster(TableName tableName, Map<ServerName, List<HRegionInfo>>
+  public List<RegionPlan> balanceCluster(TableName tableName, Map<ServerName, List<RegionInfo>>
       clusterState) throws HBaseIOException {
     return balanceCluster(clusterState);
   }
 
   @Override
-  public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState)
+  public List<RegionPlan> balanceCluster(Map<ServerName, List<RegionInfo>> clusterState)
       throws HBaseIOException {
     if (!isOnline()) {
       throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME +
           " is not online, unable to perform balance");
     }
 
-    Map<ServerName,List<HRegionInfo>> correctedState = correctAssignments(clusterState);
+    Map<ServerName,List<RegionInfo>> correctedState = correctAssignments(clusterState);
     List<RegionPlan> regionPlans = new ArrayList<>();
 
-    List<HRegionInfo> misplacedRegions = correctedState.get(LoadBalancer.BOGUS_SERVER_NAME);
-    for (HRegionInfo regionInfo : misplacedRegions) {
+    List<RegionInfo> misplacedRegions = correctedState.get(LoadBalancer.BOGUS_SERVER_NAME);
+    for (RegionInfo regionInfo : misplacedRegions) {
       regionPlans.add(new RegionPlan(regionInfo, null, null));
     }
     try {
       List<RSGroupInfo> rsgi = rsGroupInfoManager.listRSGroups();
       for (RSGroupInfo info: rsgi) {
-        Map<ServerName, List<HRegionInfo>> groupClusterState = new HashMap<>();
-        Map<TableName, Map<ServerName, List<HRegionInfo>>> groupClusterLoad = new HashMap<>();
+        Map<ServerName, List<RegionInfo>> groupClusterState = new HashMap<>();
+        Map<TableName, Map<ServerName, List<RegionInfo>>> groupClusterLoad = new HashMap<>();
         for (Address sName : info.getServers()) {
           for(ServerName curr: clusterState.keySet()) {
             if(curr.getAddress().equals(sName)) {
@@ -154,15 +154,15 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   @Override
-  public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(
-      List<HRegionInfo> regions, List<ServerName> servers) throws HBaseIOException {
-    Map<ServerName, List<HRegionInfo>> assignments = Maps.newHashMap();
-    ListMultimap<String,HRegionInfo> regionMap = ArrayListMultimap.create();
+  public Map<ServerName, List<RegionInfo>> roundRobinAssignment(
+      List<RegionInfo> regions, List<ServerName> servers) throws HBaseIOException {
+    Map<ServerName, List<RegionInfo>> assignments = Maps.newHashMap();
+    ListMultimap<String,RegionInfo> regionMap = ArrayListMultimap.create();
     ListMultimap<String,ServerName> serverMap = ArrayListMultimap.create();
     generateGroupMaps(regions, servers, regionMap, serverMap);
     for(String groupKey : regionMap.keySet()) {
       if (regionMap.get(groupKey).size() > 0) {
-        Map<ServerName, List<HRegionInfo>> result =
+        Map<ServerName, List<RegionInfo>> result =
             this.internalBalancer.roundRobinAssignment(
                 regionMap.get(groupKey),
                 serverMap.get(groupKey));
@@ -181,13 +181,13 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   @Override
-  public Map<ServerName, List<HRegionInfo>> retainAssignment(
-      Map<HRegionInfo, ServerName> regions, List<ServerName> servers) throws HBaseIOException {
+  public Map<ServerName, List<RegionInfo>> retainAssignment(
+      Map<RegionInfo, ServerName> regions, List<ServerName> servers) throws HBaseIOException {
     try {
-      Map<ServerName, List<HRegionInfo>> assignments = new TreeMap<>();
-      ListMultimap<String, HRegionInfo> groupToRegion = ArrayListMultimap.create();
-      Set<HRegionInfo> misplacedRegions = getMisplacedRegions(regions);
-      for (HRegionInfo region : regions.keySet()) {
+      Map<ServerName, List<RegionInfo>> assignments = new TreeMap<>();
+      ListMultimap<String, RegionInfo> groupToRegion = ArrayListMultimap.create();
+      Set<RegionInfo> misplacedRegions = getMisplacedRegions(regions);
+      for (RegionInfo region : regions.keySet()) {
         if (!misplacedRegions.contains(region)) {
           String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());
           groupToRegion.put(groupName, region);
@@ -196,11 +196,11 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
       // Now the "groupToRegion" map has only the regions which have correct
       // assignments.
       for (String key : groupToRegion.keySet()) {
-        Map<HRegionInfo, ServerName> currentAssignmentMap = new TreeMap<HRegionInfo, ServerName>();
-        List<HRegionInfo> regionList = groupToRegion.get(key);
+        Map<RegionInfo, ServerName> currentAssignmentMap = new TreeMap<RegionInfo, ServerName>();
+        List<RegionInfo> regionList = groupToRegion.get(key);
         RSGroupInfo info = rsGroupInfoManager.getRSGroup(key);
         List<ServerName> candidateList = filterOfflineServers(info, servers);
-        for (HRegionInfo region : regionList) {
+        for (RegionInfo region : regionList) {
           currentAssignmentMap.put(region, regions.get(region));
         }
         if(candidateList.size() > 0) {
@@ -209,7 +209,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
         }
       }
 
-      for (HRegionInfo region : misplacedRegions) {
+      for (RegionInfo region : misplacedRegions) {
         String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());;
         RSGroupInfo info = rsGroupInfoManager.getRSGroup(groupName);
         List<ServerName> candidateList = filterOfflineServers(info, servers);
@@ -235,9 +235,9 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   @Override
-  public ServerName randomAssignment(HRegionInfo region,
+  public ServerName randomAssignment(RegionInfo region,
       List<ServerName> servers) throws HBaseIOException {
-    ListMultimap<String,HRegionInfo> regionMap = LinkedListMultimap.create();
+    ListMultimap<String,RegionInfo> regionMap = LinkedListMultimap.create();
     ListMultimap<String,ServerName> serverMap = LinkedListMultimap.create();
     generateGroupMaps(Lists.newArrayList(region), servers, regionMap, serverMap);
     List<ServerName> filteredServers = serverMap.get(regionMap.keySet().iterator().next());
@@ -245,12 +245,12 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   private void generateGroupMaps(
-    List<HRegionInfo> regions,
+    List<RegionInfo> regions,
     List<ServerName> servers,
-    ListMultimap<String, HRegionInfo> regionMap,
+    ListMultimap<String, RegionInfo> regionMap,
     ListMultimap<String, ServerName> serverMap) throws HBaseIOException {
     try {
-      for (HRegionInfo region : regions) {
+      for (RegionInfo region : regions) {
         String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());
         if (groupName == null) {
           LOG.warn("Group for table "+region.getTable()+" is null");
@@ -301,11 +301,11 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
     return finalList;
   }
 
-  private Set<HRegionInfo> getMisplacedRegions(
-      Map<HRegionInfo, ServerName> regions) throws IOException {
-    Set<HRegionInfo> misplacedRegions = new HashSet<>();
-    for(Map.Entry<HRegionInfo, ServerName> region : regions.entrySet()) {
-      HRegionInfo regionInfo = region.getKey();
+  private Set<RegionInfo> getMisplacedRegions(
+      Map<RegionInfo, ServerName> regions) throws IOException {
+    Set<RegionInfo> misplacedRegions = new HashSet<>();
+    for(Map.Entry<RegionInfo, ServerName> region : regions.entrySet()) {
+      RegionInfo regionInfo = region.getKey();
       ServerName assignedServer = region.getValue();
       RSGroupInfo info = rsGroupInfoManager.getRSGroup(rsGroupInfoManager.
               getRSGroupOfTable(regionInfo.getTable()));
@@ -323,17 +323,17 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
     return misplacedRegions;
   }
 
-  private Map<ServerName, List<HRegionInfo>> correctAssignments(
-       Map<ServerName, List<HRegionInfo>> existingAssignments)
+  private Map<ServerName, List<RegionInfo>> correctAssignments(
+       Map<ServerName, List<RegionInfo>> existingAssignments)
   throws HBaseIOException{
-    Map<ServerName, List<HRegionInfo>> correctAssignments = new TreeMap<>();
-    List<HRegionInfo> misplacedRegions = new LinkedList<>();
+    Map<ServerName, List<RegionInfo>> correctAssignments = new TreeMap<>();
+    List<RegionInfo> misplacedRegions = new LinkedList<>();
     correctAssignments.put(LoadBalancer.BOGUS_SERVER_NAME, new LinkedList<>());
-    for (Map.Entry<ServerName, List<HRegionInfo>> assignments : existingAssignments.entrySet()){
+    for (Map.Entry<ServerName, List<RegionInfo>> assignments : existingAssignments.entrySet()){
       ServerName sName = assignments.getKey();
       correctAssignments.put(sName, new LinkedList<>());
-      List<HRegionInfo> regions = assignments.getValue();
-      for (HRegionInfo region : regions) {
+      List<RegionInfo> regions = assignments.getValue();
+      for (RegionInfo region : regions) {
         RSGroupInfo info = null;
         try {
           info = rsGroupInfoManager.getRSGroup(
@@ -352,7 +352,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
 
     //TODO bulk unassign?
     //unassign misplaced regions, so that they are assigned to correct groups.
-    for(HRegionInfo info: misplacedRegions) {
+    for(RegionInfo info: misplacedRegions) {
       try {
         this.masterServices.getAssignmentManager().unassign(info);
       } catch (IOException e) {
@@ -395,15 +395,15 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
   }
 
   @Override
-  public void setClusterLoad(Map<TableName, Map<ServerName, List<HRegionInfo>>> clusterLoad) {
+  public void setClusterLoad(Map<TableName, Map<ServerName, List<RegionInfo>>> clusterLoad) {
   }
 
   @Override
-  public void regionOnline(HRegionInfo regionInfo, ServerName sn) {
+  public void regionOnline(RegionInfo regionInfo, ServerName sn) {
   }
 
   @Override
-  public void regionOffline(HRegionInfo regionInfo) {
+  public void regionOffline(RegionInfo regionInfo) {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 4a3747d..e116f58 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -42,19 +42,18 @@ import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -74,16 +73,18 @@ import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+
 import com.google.protobuf.ServiceException;
 
 /**
@@ -639,8 +640,8 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     }
 
     private boolean waitForGroupTableOnline() {
-      final List<HRegionInfo> foundRegions = new LinkedList<>();
-      final List<HRegionInfo> assignedRegions = new LinkedList<>();
+      final List<RegionInfo> foundRegions = new LinkedList<>();
+      final List<RegionInfo> assignedRegions = new LinkedList<>();
       final AtomicBoolean found = new AtomicBoolean(false);
       final TableStateManager tsm = masterServices.getTableStateManager();
       boolean createSent = false;
@@ -659,7 +660,7 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager {
             MetaTableAccessor.Visitor visitor = new DefaultVisitorBase() {
               @Override
               public boolean visitInternal(Result row) throws IOException {
-                HRegionInfo info = MetaTableAccessor.getHRegionInfo(row);
+                RegionInfo info = MetaTableAccessor.getRegionInfo(row);
                 if (info != null) {
                   Cell serverCell =
                       row.getColumnLatestCell(HConstants.CATALOG_FAMILY,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
index fe62d16..db7cf4d 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
@@ -17,27 +17,44 @@
  */
 package org.apache.hadoop.hbase.master.balancer;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.security.SecureRandom;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
-import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
-import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.BeforeClass;
@@ -47,23 +64,8 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.security.SecureRandom;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 
 //TODO use stochastic based load balancer instead
 @Category(SmallTests.class)
@@ -114,7 +116,7 @@ public class TestRSGroupBasedLoadBalancer {
    */
   @Test
   public void testBalanceCluster() throws Exception {
-    Map<ServerName, List<HRegionInfo>> servers = mockClusterServers();
+    Map<ServerName, List<RegionInfo>> servers = mockClusterServers();
     ArrayListMultimap<String, ServerAndLoad> list = convertToGroupBasedMap(servers);
     LOG.info("Mock Cluster :  " + printStats(list));
     List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
@@ -169,11 +171,11 @@ public class TestRSGroupBasedLoadBalancer {
    * @throws java.io.IOException
    * @throws java.io.FileNotFoundException
    */
-  private void assertImmediateAssignment(List<HRegionInfo> regions,
+  private void assertImmediateAssignment(List<RegionInfo> regions,
                                          List<ServerName> servers,
-                                         Map<HRegionInfo, ServerName> assignments)
+                                         Map<RegionInfo, ServerName> assignments)
       throws IOException {
-    for (HRegionInfo region : regions) {
+    for (RegionInfo region : regions) {
       assertTrue(assignments.containsKey(region));
       ServerName server = assignments.get(region);
       TableName tableName = region.getTable();
@@ -197,8 +199,8 @@ public class TestRSGroupBasedLoadBalancer {
    */
   @Test
   public void testBulkAssignment() throws Exception {
-    List<HRegionInfo> regions = randomRegions(25);
-    Map<ServerName, List<HRegionInfo>> assignments = loadBalancer
+    List<RegionInfo> regions = randomRegions(25);
+    Map<ServerName, List<RegionInfo>> assignments = loadBalancer
         .roundRobinAssignment(regions, servers);
     //test empty region/servers scenario
     //this should not throw an NPE
@@ -207,8 +209,8 @@ public class TestRSGroupBasedLoadBalancer {
     //test regular scenario
     assertTrue(assignments.keySet().size() == servers.size());
     for (ServerName sn : assignments.keySet()) {
-      List<HRegionInfo> regionAssigned = assignments.get(sn);
-      for (HRegionInfo region : regionAssigned) {
+      List<RegionInfo> regionAssigned = assignments.get(sn);
+      for (RegionInfo region : regionAssigned) {
         TableName tableName = region.getTable();
         String groupName =
             getMockedGroupInfoManager().getRSGroupOfTable(tableName);
@@ -233,16 +235,16 @@ public class TestRSGroupBasedLoadBalancer {
   @Test
   public void testRetainAssignment() throws Exception {
     // Test simple case where all same servers are there
-    Map<ServerName, List<HRegionInfo>> currentAssignments = mockClusterServers();
-    Map<HRegionInfo, ServerName> inputForTest = new HashMap<>();
+    Map<ServerName, List<RegionInfo>> currentAssignments = mockClusterServers();
+    Map<RegionInfo, ServerName> inputForTest = new HashMap<>();
     for (ServerName sn : currentAssignments.keySet()) {
-      for (HRegionInfo region : currentAssignments.get(sn)) {
+      for (RegionInfo region : currentAssignments.get(sn)) {
         inputForTest.put(region, sn);
       }
     }
     //verify region->null server assignment is handled
     inputForTest.put(randomRegions(1).get(0), null);
-    Map<ServerName, List<HRegionInfo>> newAssignment = loadBalancer
+    Map<ServerName, List<RegionInfo>> newAssignment = loadBalancer
         .retainAssignment(inputForTest, servers);
     assertRetainedAssignment(inputForTest, servers, newAssignment);
   }
@@ -255,9 +257,9 @@ public class TestRSGroupBasedLoadBalancer {
   public void testRoundRobinAssignment() throws Exception {
     List<ServerName> onlineServers = new ArrayList<ServerName>(servers.size());
     onlineServers.addAll(servers);
-    List<HRegionInfo> regions = randomRegions(25);
+    List<RegionInfo> regions = randomRegions(25);
     int bogusRegion = 0;
-    for(HRegionInfo region : regions){
+    for(RegionInfo region : regions){
       String group = tableMap.get(region.getTable());
       if("dg3".equals(group) || "dg4".equals(group)){
         bogusRegion++;
@@ -273,7 +275,7 @@ public class TestRSGroupBasedLoadBalancer {
         it.remove();
       }
     }
-    Map<ServerName, List<HRegionInfo>> assignments = loadBalancer
+    Map<ServerName, List<RegionInfo>> assignments = loadBalancer
         .roundRobinAssignment(regions, onlineServers);
     assertEquals(bogusRegion, assignments.get(LoadBalancer.BOGUS_SERVER_NAME).size());
   }
@@ -294,17 +296,17 @@ public class TestRSGroupBasedLoadBalancer {
    * @throws java.io.FileNotFoundException
    */
   private void assertRetainedAssignment(
-      Map<HRegionInfo, ServerName> existing, List<ServerName> servers,
-      Map<ServerName, List<HRegionInfo>> assignment)
+      Map<RegionInfo, ServerName> existing, List<ServerName> servers,
+      Map<ServerName, List<RegionInfo>> assignment)
       throws FileNotFoundException, IOException {
     // Verify condition 1, every region assigned, and to online server
     Set<ServerName> onlineServerSet = new TreeSet<>(servers);
-    Set<HRegionInfo> assignedRegions = new TreeSet<>();
-    for (Map.Entry<ServerName, List<HRegionInfo>> a : assignment.entrySet()) {
+    Set<RegionInfo> assignedRegions = new TreeSet<>(RegionInfo.COMPARATOR);
+    for (Map.Entry<ServerName, List<RegionInfo>> a : assignment.entrySet()) {
       assertTrue(
           "Region assigned to server that was not listed as online",
           onlineServerSet.contains(a.getKey()));
-      for (HRegionInfo r : a.getValue())
+      for (RegionInfo r : a.getValue())
         assignedRegions.add(r);
     }
     assertEquals(existing.size(), assignedRegions.size());
@@ -315,9 +317,9 @@ public class TestRSGroupBasedLoadBalancer {
       onlineHostNames.add(s.getHostname());
     }
 
-    for (Map.Entry<ServerName, List<HRegionInfo>> a : assignment.entrySet()) {
+    for (Map.Entry<ServerName, List<RegionInfo>> a : assignment.entrySet()) {
       ServerName currentServer = a.getKey();
-      for (HRegionInfo r : a.getValue()) {
+      for (RegionInfo r : a.getValue()) {
         ServerName oldAssignedServer = existing.get(r);
         TableName tableName = r.getTable();
         String groupName =
@@ -374,7 +376,7 @@ public class TestRSGroupBasedLoadBalancer {
   }
 
   private ArrayListMultimap<String, ServerAndLoad> convertToGroupBasedMap(
-      final Map<ServerName, List<HRegionInfo>> serversMap) throws IOException {
+      final Map<ServerName, List<RegionInfo>> serversMap) throws IOException {
     ArrayListMultimap<String, ServerAndLoad> loadMap = ArrayListMultimap
         .create();
     for (RSGroupInfo gInfo : getMockedGroupInfoManager().listRSGroups()) {
@@ -387,7 +389,7 @@ public class TestRSGroupBasedLoadBalancer {
             break;
           }
         }
-        List<HRegionInfo> regions = serversMap.get(actual);
+        List<RegionInfo> regions = serversMap.get(actual);
         assertTrue("No load for " + actual, regions != null);
         loadMap.put(gInfo.getName(),
             new ServerAndLoad(actual, regions.size()));
@@ -434,12 +436,12 @@ public class TestRSGroupBasedLoadBalancer {
     }
   }
 
-  private Map<ServerName, List<HRegionInfo>> mockClusterServers() throws IOException {
+  private Map<ServerName, List<RegionInfo>> mockClusterServers() throws IOException {
     assertTrue(servers.size() == regionAssignment.length);
-    Map<ServerName, List<HRegionInfo>> assignment = new TreeMap<>();
+    Map<ServerName, List<RegionInfo>> assignment = new TreeMap<>();
     for (int i = 0; i < servers.size(); i++) {
       int numRegions = regionAssignment[i];
-      List<HRegionInfo> regions = assignedRegions(numRegions, servers.get(i));
+      List<RegionInfo> regions = assignedRegions(numRegions, servers.get(i));
       assignment.put(servers.get(i), regions);
     }
     return assignment;
@@ -449,10 +451,10 @@ public class TestRSGroupBasedLoadBalancer {
    * Generate a list of regions evenly distributed between the tables.
    *
    * @param numRegions The number of regions to be generated.
-   * @return List of HRegionInfo.
+   * @return List of RegionInfo.
    */
-  private List<HRegionInfo> randomRegions(int numRegions) {
-    List<HRegionInfo> regions = new ArrayList<>(numRegions);
+  private List<RegionInfo> randomRegions(int numRegions) {
+    List<RegionInfo> regions = new ArrayList<>(numRegions);
     byte[] start = new byte[16];
     byte[] end = new byte[16];
     rand.nextBytes(start);
@@ -462,9 +464,12 @@ public class TestRSGroupBasedLoadBalancer {
       Bytes.putInt(start, 0, numRegions << 1);
       Bytes.putInt(end, 0, (numRegions << 1) + 1);
       int tableIndex = (i + regionIdx) % tables.length;
-      HRegionInfo hri = new HRegionInfo(
-          tables[tableIndex], start, end, false, regionId++);
-      regions.add(hri);
+      regions.add(RegionInfoBuilder.newBuilder(tables[tableIndex])
+          .setStartKey(start)
+          .setEndKey(end)
+          .setSplit(false)
+          .setRegionId(regionId++)
+          .build());
     }
     return regions;
   }
@@ -477,18 +482,20 @@ public class TestRSGroupBasedLoadBalancer {
    * @return the list of regions
    * @throws java.io.IOException Signals that an I/O exception has occurred.
    */
-  private List<HRegionInfo> assignedRegions(int numRegions, ServerName sn) throws IOException {
-    List<HRegionInfo> regions = new ArrayList<>(numRegions);
+  private List<RegionInfo> assignedRegions(int numRegions, ServerName sn) throws IOException {
+    List<RegionInfo> regions = new ArrayList<>(numRegions);
     byte[] start = new byte[16];
     byte[] end = new byte[16];
     Bytes.putInt(start, 0, numRegions << 1);
     Bytes.putInt(end, 0, (numRegions << 1) + 1);
     for (int i = 0; i < numRegions; i++) {
       TableName tableName = getTableName(sn);
-      HRegionInfo hri = new HRegionInfo(
-          tableName, start, end, false,
-          regionId++);
-      regions.add(hri);
+      regions.add(RegionInfoBuilder.newBuilder(tableName)
+          .setStartKey(start)
+          .setEndKey(end)
+          .setSplit(false)
+          .setRegionId(regionId++)
+          .build());
     }
     return regions;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 6a5d68b..f0291fa 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -36,32 +36,32 @@ import java.util.TreeMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseCluster;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TestName;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-import org.junit.rules.TestName;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 
 public abstract class TestRSGroupsBase {
   protected static final Log LOG = LogFactory.getLog(TestRSGroupsBase.class);
@@ -166,7 +166,7 @@ public abstract class TestRSGroupsBase {
       for(RegionLoad rl : status.getLoad(serverName).getRegionsLoad().values()) {
         TableName tableName = null;
         try {
-          tableName = HRegionInfo.getTable(rl.getName());
+          tableName = RegionInfo.getTable(rl.getName());
         } catch (IllegalArgumentException e) {
           LOG.warn("Failed parse a table name from regionname=" +
               Bytes.toStringBinary(rl.getName()));
@@ -417,7 +417,7 @@ public abstract class TestRSGroupsBase {
     Map<ServerName,List<String>> assignMap =
         getTableServerRegionMap().get(tableName);
     final ServerName first = assignMap.entrySet().iterator().next().getKey();
-    for(HRegionInfo region: admin.getTableRegions(tableName)) {
+    for(RegionInfo region: admin.getTableRegions(tableName)) {
       if(!assignMap.get(first).contains(region)) {
         admin.move(region.getEncodedNameAsBytes(), Bytes.toBytes(first.getServerName()));
       }
@@ -514,7 +514,7 @@ public abstract class TestRSGroupsBase {
     });
 
     // Lets move this region to the new group.
-    TEST_UTIL.getAdmin().move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))),
+    TEST_UTIL.getAdmin().move(Bytes.toBytes(RegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))),
         Bytes.toBytes(targetServer.getServerName()));
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
@@ -587,7 +587,7 @@ public abstract class TestRSGroupsBase {
         appInfo.getServers().iterator().next().toString());
     AdminProtos.AdminService.BlockingInterface targetRS =
       ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
-    HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0);
+    RegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0);
     Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size());
 
     try {
@@ -728,7 +728,7 @@ public abstract class TestRSGroupsBase {
     //get server which is not a member of new group
     ServerName targetServer = null;
     for(ServerName server : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
-      if(!newGroup.containsServer(server.getAddress()) && 
+      if(!newGroup.containsServer(server.getAddress()) &&
            !rsGroupAdmin.getRSGroupInfo("master").containsServer(server.getAddress())) {
         targetServer = server;
         break;
@@ -780,7 +780,7 @@ public abstract class TestRSGroupsBase {
     List<String> regionList = getTableRegionMap().get(tableName);
     for(String region : regionList) {
       // Lets move this region to the targetServer
-      TEST_UTIL.getAdmin().move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(region))),
+      TEST_UTIL.getAdmin().move(Bytes.toBytes(RegionInfo.encodeRegionName(Bytes.toBytes(region))),
               Bytes.toBytes(targetServer.getServerName()));
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index bf3b623..292a668 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -30,9 +30,9 @@ AssignmentManager assignmentManager = null;
 </%args>
 <%import>
 java.util.*;
+org.apache.hadoop.hbase.client.RegionInfo;
 org.apache.hadoop.hbase.HBaseConfiguration;
 org.apache.hadoop.hbase.HConstants;
-org.apache.hadoop.hbase.HRegionInfo;
 org.apache.hadoop.hbase.HTableDescriptor;
 org.apache.hadoop.hbase.NamespaceDescriptor;
 org.apache.hadoop.hbase.ServerLoad;
@@ -435,7 +435,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
     <%for HTableDescriptor htDesc : tables%>
     <%java>
       TableName tableName = htDesc.getTableName();
-      Map<RegionState.State, List<HRegionInfo>> tableRegions =
+      Map<RegionState.State, List<RegionInfo>> tableRegions =
           master.getAssignmentManager().getRegionStates()
             .getRegionByStateOfTable(tableName);
       int openRegionsCount = tableRegions.get(RegionState.State.OPEN).size();
@@ -444,7 +444,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
       int failedRegionsCount = tableRegions.get(RegionState.State.FAILED_OPEN).size()
              + tableRegions.get(RegionState.State.FAILED_CLOSE).size();
       int otherRegionsCount = 0;
-      for (List<HRegionInfo> list: tableRegions.values()) {
+      for (List<RegionInfo> list: tableRegions.values()) {
          otherRegionsCount += list.size();
       }
       // now subtract known states

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
index 6b403b2..c9bfcc9 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
@@ -26,7 +26,7 @@ String bcv = "";
 <%import>
 java.util.*;
 org.apache.hadoop.hbase.regionserver.HRegionServer;
-org.apache.hadoop.hbase.HRegionInfo;
+org.apache.hadoop.hbase.client.RegionInfo;
 org.apache.hadoop.hbase.ServerName;
 org.apache.hadoop.hbase.HBaseConfiguration;
 org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -44,7 +44,7 @@ org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 <%java>
   ServerInfo serverInfo = ProtobufUtil.getServerInfo(null, regionServer.getRSRpcServices());
   ServerName serverName = ProtobufUtil.toServerName(serverInfo.getServerName());
-  List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(regionServer.getRSRpcServices());
+  List<RegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(regionServer.getRSRpcServices());
   MasterAddressTracker masterAddressTracker = regionServer.getMasterAddressTracker();
   ServerName masterServerName = masterAddressTracker == null ? null
     : masterAddressTracker.getMasterAddress();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index 285dbe6..cf0e8ad 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -18,14 +18,15 @@
 </%doc>
 <%args>
         HRegionServer regionServer;
-        List<HRegionInfo> onlineRegions;
+        List<RegionInfo> onlineRegions;
 </%args>
 <%import>
         java.util.*;
         org.apache.commons.lang3.time.FastDateFormat;
         org.apache.hadoop.hbase.regionserver.HRegionServer;
         org.apache.hadoop.hbase.util.Bytes;
-        org.apache.hadoop.hbase.HRegionInfo;
+        org.apache.hadoop.hbase.client.RegionInfo;
+        org.apache.hadoop.hbase.client.RegionInfoDisplay;
         org.apache.hadoop.hbase.regionserver.Region;
         org.apache.hadoop.hbase.ServerName;
         org.apache.hadoop.hbase.HBaseConfiguration;
@@ -39,7 +40,7 @@
 <%if (onlineRegions != null && onlineRegions.size() > 0) %>
 
     <%java>
-        Collections.sort(onlineRegions);
+        Collections.sort(onlineRegions, RegionInfo.COMPARATOR);
     </%java>
 
     <div class="tabbable">
@@ -85,7 +86,7 @@
 
 <%def baseInfo>
 <%args>
-    List<HRegionInfo> onlineRegions;
+    List<RegionInfo> onlineRegions;
 </%args>
     <table class="table table-striped">
         <tr>
@@ -95,16 +96,16 @@
             <th>ReplicaID</th>
         </tr>
 
-        <%for HRegionInfo r: onlineRegions %>
+        <%for RegionInfo r: onlineRegions %>
         <tr>
             <%java>
-             String displayName = HRegionInfo.getRegionNameAsStringForDisplay(r,
+             String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r,
                regionServer.getConfiguration());
             </%java>
             <td><a href="region.jsp?name=<% r.getEncodedName() %>"><% displayName %></a></td>
-            <td><% Bytes.toStringBinary(HRegionInfo.getStartKeyForDisplay(r,
+            <td><% Bytes.toStringBinary(RegionInfoDisplay.getStartKeyForDisplay(r,
                                         regionServer.getConfiguration())) %></td>
-            <td><% Bytes.toStringBinary(HRegionInfo.getEndKeyForDisplay(r,
+            <td><% Bytes.toStringBinary(RegionInfoDisplay.getEndKeyForDisplay(r,
                                         regionServer.getConfiguration())) %></td>
             <td><% r.getReplicaId() %></td>
         </tr>
@@ -114,7 +115,7 @@
 
 <%def requestStats>
 <%args>
-    List<HRegionInfo> onlineRegions;
+    List<RegionInfo> onlineRegions;
 </%args>
     <table class="table table-striped">
         <tr>
@@ -124,12 +125,12 @@
             <th>Write Request Count</th>
         </tr>
 
-        <%for HRegionInfo r: onlineRegions %>
+        <%for RegionInfo r: onlineRegions %>
 
         <tr>
         <%java>
             RegionLoad load = regionServer.createRegionLoad(r.getEncodedName());
-            String displayName = HRegionInfo.getRegionNameAsStringForDisplay(r,
+            String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r,
               regionServer.getConfiguration());
         </%java>
             <td><a href="region.jsp?name=<% r.getEncodedName() %>"><% displayName %></a></td>
@@ -146,7 +147,7 @@
 
 <%def storeStats>
 <%args>
-    List<HRegionInfo> onlineRegions;
+    List<RegionInfo> onlineRegions;
 </%args>
     <table class="table table-striped">
         <tr>
@@ -160,12 +161,12 @@
             <th>Data Locality</th>
         </tr>
 
-        <%for HRegionInfo r: onlineRegions %>
+        <%for RegionInfo r: onlineRegions %>
 
         <tr>
         <%java>
             RegionLoad load = regionServer.createRegionLoad(r.getEncodedName());
-            String displayName = HRegionInfo.getRegionNameAsStringForDisplay(r,
+            String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r,
               regionServer.getConfiguration());
         </%java>
             <td><a href="region.jsp?name=<% r.getEncodedName() %>"><% displayName %></a></td>
@@ -190,7 +191,7 @@
 
 <%def compactStats>
 <%args>
-    List<HRegionInfo> onlineRegions;
+    List<RegionInfo> onlineRegions;
 </%args>
     <table class="table table-striped">
         <tr>
@@ -201,7 +202,7 @@
             <th>Last Major Compaction</th>
         </tr>
 
-        <%for HRegionInfo r: onlineRegions %>
+        <%for RegionInfo r: onlineRegions %>
 
         <tr>
         <%java>
@@ -218,7 +219,7 @@
                 compactTime = fdf.format(load.getLastMajorCompactionTs());
               }
             }
-            String displayName = HRegionInfo.getRegionNameAsStringForDisplay(r,
+            String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r,
               regionServer.getConfiguration());
         </%java>
             <td><a href="region.jsp?name=<% r.getEncodedName() %>"><% displayName %></a></td>
@@ -235,7 +236,7 @@
 
 <%def memstoreStats>
 <%args>
-    List<HRegionInfo> onlineRegions;
+    List<RegionInfo> onlineRegions;
 </%args>
     <table class="table table-striped">
         <tr>
@@ -243,12 +244,12 @@
             <th>Memstore Size</th>
         </tr>
 
-        <%for HRegionInfo r: onlineRegions %>
+        <%for RegionInfo r: onlineRegions %>
 
         <tr>
         <%java>
             RegionLoad load = regionServer.createRegionLoad(r.getEncodedName());
-            String displayName = HRegionInfo.getRegionNameAsStringForDisplay(r,
+            String displayName = RegionInfoDisplay.getRegionNameAsStringForDisplay(r,
               regionServer.getConfiguration());
         </%java>
             <td><a href="region.jsp?name=<% r.getEncodedName() %>"><% displayName %></a></td>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
index 43685ae..e57471a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -31,22 +32,22 @@ public interface RegionStateListener {
   /**
    * Process region split event.
    *
-   * @param hri An instance of HRegionInfo
+   * @param hri An instance of RegionInfo
    * @throws IOException
    */
-  void onRegionSplit(HRegionInfo hri) throws IOException;
+  void onRegionSplit(RegionInfo hri) throws IOException;
 
   /**
    * Process region split reverted event.
    *
-   * @param hri An instance of HRegionInfo
+   * @param hri An instance of RegionInfo
    * @throws IOException Signals that an I/O exception has occurred.
    */
-  void onRegionSplitReverted(HRegionInfo hri) throws IOException;
+  void onRegionSplitReverted(RegionInfo hri) throws IOException;
 
   /**
    * Process region merge event.
    * @throws IOException
    */
-  void onRegionMerged(HRegionInfo mergedRegion) throws IOException;
+  void onRegionMerged(RegionInfo mergedRegion) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index 4321dc8..4da1235 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -75,7 +75,7 @@ public class HFileArchiver {
   /**
    * @return True if the Region exits in the filesystem.
    */
-  public static boolean exists(Configuration conf, FileSystem fs, HRegionInfo info)
+  public static boolean exists(Configuration conf, FileSystem fs, RegionInfo info)
       throws IOException {
     Path rootDir = FSUtils.getRootDir(conf);
     Path regionDir = HRegion.getRegionDir(rootDir, info);
@@ -87,10 +87,10 @@ public class HFileArchiver {
    * archive directory
    * @param conf the configuration to use
    * @param fs the file system object
-   * @param info HRegionInfo for region to be deleted
+   * @param info RegionInfo for region to be deleted
    * @throws IOException
    */
-  public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
+  public static void archiveRegion(Configuration conf, FileSystem fs, RegionInfo info)
       throws IOException {
     Path rootDir = FSUtils.getRootDir(conf);
     archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()),
@@ -176,7 +176,7 @@ public class HFileArchiver {
    * @throws IOException if the files could not be correctly disposed.
    */
   public static void archiveFamily(FileSystem fs, Configuration conf,
-      HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
+      RegionInfo parent, Path tableDir, byte[] family) throws IOException {
     Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
     archiveFamilyByFamilyDir(fs, conf, parent, familyDir, family);
   }
@@ -192,7 +192,7 @@ public class HFileArchiver {
    * @throws IOException if the files could not be correctly disposed.
    */
   public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf,
-      HRegionInfo parent, Path familyDir, byte[] family) throws IOException {
+      RegionInfo parent, Path familyDir, byte[] family) throws IOException {
     FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
     if (storeFiles == null) {
       LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
@@ -219,13 +219,13 @@ public class HFileArchiver {
    * Remove the store files, either by archiving them or outright deletion
    * @param conf {@link Configuration} to examine to determine the archive directory
    * @param fs the filesystem where the store files live
-   * @param regionInfo {@link HRegionInfo} of the region hosting the store files
+   * @param regionInfo {@link RegionInfo} of the region hosting the store files
    * @param family the family hosting the store files
    * @param compactedFiles files to be disposed of. No further reading of these files should be
    *          attempted; otherwise likely to cause an {@link IOException}
    * @throws IOException if the files could not be correctly disposed.
    */
-  public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
+  public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionInfo regionInfo,
       Path tableDir, byte[] family, Collection<HStoreFile> compactedFiles)
       throws IOException, FailedArchiveException {
 
@@ -284,7 +284,7 @@ public class HFileArchiver {
    * @param storeFile file to be archived
    * @throws IOException if the files could not be correctly disposed.
    */
-  public static void archiveStoreFile(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
+  public static void archiveStoreFile(Configuration conf, FileSystem fs, RegionInfo regionInfo,
       Path tableDir, byte[] family, Path storeFile) throws IOException {
     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
     // make sure we don't archive if we can't and that the archive dir exists

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index efdf8e5..72a2ea4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -29,11 +29,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A client scanner for a region opened for read-only on the client side. Assumes region data
@@ -49,7 +48,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
   List<Cell> values;
 
   public ClientSideRegionScanner(Configuration conf, FileSystem fs,
-      Path rootDir, TableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
+      Path rootDir, TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics)
           throws IOException {
     // region is immutable, set isolation level
     scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
index 9244ced..ab361c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
@@ -30,10 +30,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A Scanner which performs a scan over snapshot files. Using this class requires copying the
@@ -73,7 +72,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
   private Path rootDir;
   private Path restoreDir;
   private Scan scan;
-  private ArrayList<HRegionInfo> regions;
+  private ArrayList<RegionInfo> regions;
   private TableDescriptor htd;
 
   private ClientSideRegionScanner currentRegionScanner  = null;
@@ -121,11 +120,11 @@ public class TableSnapshotScanner extends AbstractClientScanner {
     final RestoreSnapshotHelper.RestoreMetaChanges meta =
       RestoreSnapshotHelper.copySnapshotForScanner(
         conf, fs, rootDir, restoreDir, snapshotName);
-    final List<HRegionInfo> restoredRegions = meta.getRegionsToAdd();
+    final List<RegionInfo> restoredRegions = meta.getRegionsToAdd();
 
     htd = meta.getTableDescriptor();
     regions = new ArrayList<>(restoredRegions.size());
-    for (HRegionInfo hri : restoredRegions) {
+    for (RegionInfo hri : restoredRegions) {
       if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) {
         continue;
       }
@@ -136,7 +135,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
     }
 
     // sort for regions according to startKey.
-    Collections.sort(regions);
+    Collections.sort(regions, RegionInfo.COMPARATOR);
     initScanMetrics(scan);
   }
 
@@ -150,7 +149,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
           return null;
         }
 
-        HRegionInfo hri = regions.get(currentRegion);
+        RegionInfo hri = regions.get(currentRegion);
         currentRegionScanner = new ClientSideRegionScanner(conf, fs,
           restoreDir, htd, hri, scan, scanMetrics);
         if (this.scanMetrics != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java
index 2a77d4c..8694a4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java
@@ -19,22 +19,22 @@
 
 package org.apache.hadoop.hbase.client.locking;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.NonceGenerator;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
-import org.apache.hadoop.hbase.client.NonceGenerator;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType;
 
 /**
  * Helper class to create "master locks" for namespaces, tables and regions.
@@ -83,7 +83,7 @@ public class LockServiceClient {
    * Create a new EntityLock object to acquire exclusive lock on multiple regions of same tables.
    * Internally, the table and its namespace will also be locked in shared mode.
    */
-  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description, Abortable abort) {
+  public EntityLock regionLock(List<RegionInfo> regionInfos, String description, Abortable abort) {
     LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE,
         null, null, regionInfos, description, ng.getNonceGroup(), ng.newNonce());
     return new EntityLock(conf, stub, lockRequest, abort);
@@ -91,15 +91,15 @@ public class LockServiceClient {
 
   @VisibleForTesting
   public static LockRequest buildLockRequest(final LockType type,
-      final String namespace, final TableName tableName, final List<HRegionInfo> regionInfos,
+      final String namespace, final TableName tableName, final List<RegionInfo> regionInfos,
       final String description, final long nonceGroup, final long nonce) {
     final LockRequest.Builder builder = LockRequest.newBuilder()
       .setLockType(type)
       .setNonceGroup(nonceGroup)
       .setNonce(nonce);
     if (regionInfos != null) {
-      for (HRegionInfo hri: regionInfos) {
-        builder.addRegionInfo(HRegionInfo.convert(hri));
+      for (RegionInfo hri: regionInfos) {
+        builder.addRegionInfo(ProtobufUtil.toRegionInfo(hri));
       }
     } else if (namespace != null) {
       builder.setNamespace(namespace);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
index 288721a..df8103b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
@@ -24,15 +24,15 @@ import java.io.InterruptedIOException;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
 import org.apache.hadoop.hbase.master.SplitLogManager.Task;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 
 /**
  * Coordination for SplitLogManager. It creates and works with tasks for split log operations<BR>
@@ -123,7 +123,7 @@ public interface SplitLogManagerCoordination {
    * @throws IOException in case of failure
    * @throws InterruptedIOException
    */
-  void markRegionsRecovering(final ServerName serverName, Set<HRegionInfo> userRegions)
+  void markRegionsRecovering(final ServerName serverName, Set<RegionInfo> userRegions)
       throws IOException, InterruptedIOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index ef6ef62..5fd20e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.hbase.coordination;
 
-import static org.apache.hadoop.hbase.util.CollectionUtils.*;
 import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK;
 import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.FORCE;
 import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.DELETED;
 import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.FAILURE;
 import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.IN_PROGRESS;
 import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.SUCCESS;
+import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -39,17 +39,16 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.TaskFinisher.Status;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
 import org.apache.hadoop.hbase.master.SplitLogManager.Task;
 import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WALSplitter;
@@ -59,6 +58,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.AsyncCallback;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
@@ -66,6 +66,8 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.data.Stat;
 
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
+
 /**
  * ZooKeeper based implementation of
  * {@link SplitLogManagerCoordination}
@@ -287,7 +289,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
   public void removeRecoveringRegions(final Set<String> recoveredServerNameSet,
       Boolean isMetaRecovery)
   throws IOException {
-    final String metaEncodeRegionName = HRegionInfo.FIRST_META_REGIONINFO.getEncodedName();
+    final String metaEncodeRegionName = RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName();
     int count = 0;
     try {
       List<String> tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode);
@@ -594,10 +596,10 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
    * @param userRegions user regiones assigned on the region server
    */
   @Override
-  public void markRegionsRecovering(final ServerName serverName, Set<HRegionInfo> userRegions)
+  public void markRegionsRecovering(final ServerName serverName, Set<RegionInfo> userRegions)
       throws IOException, InterruptedIOException {
     this.lastRecoveringNodeCreationTime = EnvironmentEdgeManager.currentTime();
-    for (HRegionInfo region : userRegions) {
+    for (RegionInfo region : userRegions) {
       String regionEncodeName = region.getEncodedName();
       long retries = this.zkretries;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
index c4fb440..fbb4101 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
@@ -24,22 +24,22 @@ import java.util.Optional;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.yetus.audience.InterfaceStability;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.WrongRegionException;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.WrongRegionException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
 
 import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
@@ -92,7 +92,7 @@ public class MultiRowMutationEndpoint extends MultiRowMutationService implements
         mutations.add(ProtobufUtil.toMutation(m));
       }
 
-      HRegionInfo regionInfo = env.getRegion().getRegionInfo();
+      RegionInfo regionInfo = env.getRegion().getRegionInfo();
       for (Mutation m : mutations) {
         // check whether rows are in range for this region
         if (!HRegion.rowIsInRange(regionInfo, m.getRow())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index ef5c650..f0afad9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -37,29 +37,29 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.RackManager;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.FavoredNodes;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.FavoredNodes;
 
 /**
  * Helper class for {@link FavoredNodeLoadBalancer} that has all the intelligence for racks,
  * meta scans, etc. Instantiated by the {@link FavoredNodeLoadBalancer} when needed (from
- * within calls like {@link FavoredNodeLoadBalancer#randomAssignment(HRegionInfo, List)}).
+ * within calls like {@link FavoredNodeLoadBalancer#randomAssignment(RegionInfo, List)}).
  * All updates to favored nodes should only be done from {@link FavoredNodesManager} and not
  * through this helper class (except for tests).
  */
@@ -116,15 +116,15 @@ public class FavoredNodeAssignmentHelper {
 
   /**
    * Update meta table with favored nodes info
-   * @param regionToFavoredNodes map of HRegionInfo's to their favored nodes
+   * @param regionToFavoredNodes map of RegionInfo's to their favored nodes
    * @param connection connection to be used
    * @throws IOException
    */
   public static void updateMetaWithFavoredNodesInfo(
-      Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
+      Map<RegionInfo, List<ServerName>> regionToFavoredNodes,
       Connection connection) throws IOException {
     List<Put> puts = new ArrayList<>();
-    for (Map.Entry<HRegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
+    for (Map.Entry<RegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
       Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
       if (put != null) {
         puts.add(put);
@@ -141,10 +141,10 @@ public class FavoredNodeAssignmentHelper {
    * @throws IOException
    */
   public static void updateMetaWithFavoredNodesInfo(
-      Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
+      Map<RegionInfo, List<ServerName>> regionToFavoredNodes,
       Configuration conf) throws IOException {
     List<Put> puts = new ArrayList<>();
-    for (Map.Entry<HRegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
+    for (Map.Entry<RegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
       Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
       if (put != null) {
         puts.add(put);
@@ -170,7 +170,7 @@ public class FavoredNodeAssignmentHelper {
    * @param favoredNodeList
    * @return Put object
    */
-  static Put makePutFromRegionInfo(HRegionInfo regionInfo, List<ServerName>favoredNodeList)
+  static Put makePutFromRegionInfo(RegionInfo regionInfo, List<ServerName>favoredNodeList)
   throws IOException {
     Put put = null;
     if (favoredNodeList != null) {
@@ -225,8 +225,8 @@ public class FavoredNodeAssignmentHelper {
   // If there were fewer servers in one rack, say r3, which had 3 servers, one possible
   // placement could be r2:s5, <skip-r3>, r4:s5, r1:s5, r2:s6, <skip-r3> ...
   // The regions should be distributed proportionately to the racksizes
-  public void placePrimaryRSAsRoundRobin(Map<ServerName, List<HRegionInfo>> assignmentMap,
-      Map<HRegionInfo, ServerName> primaryRSMap, List<HRegionInfo> regions) {
+  public void placePrimaryRSAsRoundRobin(Map<ServerName, List<RegionInfo>> assignmentMap,
+      Map<RegionInfo, ServerName> primaryRSMap, List<RegionInfo> regions) {
     List<String> rackList = new ArrayList<>(rackToRegionServerMap.size());
     rackList.addAll(rackToRegionServerMap.keySet());
     int rackIndex = random.nextInt(rackList.size());
@@ -239,7 +239,7 @@ public class FavoredNodeAssignmentHelper {
     int numIterations = 0;
     // Initialize the current processing host index.
     int serverIndex = random.nextInt(maxRackSize);
-    for (HRegionInfo regionInfo : regions) {
+    for (RegionInfo regionInfo : regions) {
       List<ServerName> currentServerList;
       String rackName;
       while (true) {
@@ -264,7 +264,7 @@ public class FavoredNodeAssignmentHelper {
       // Place the current region with the current primary region server
       primaryRSMap.put(regionInfo, currentServer);
       if (assignmentMap != null) {
-        List<HRegionInfo> regionsForServer = assignmentMap.get(currentServer);
+        List<RegionInfo> regionsForServer = assignmentMap.get(currentServer);
         if (regionsForServer == null) {
           regionsForServer = new ArrayList<>();
           assignmentMap.put(currentServer, regionsForServer);
@@ -282,12 +282,12 @@ public class FavoredNodeAssignmentHelper {
     }
   }
 
-  public Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryRS(
-      Map<HRegionInfo, ServerName> primaryRSMap) {
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<>();
-    for (Map.Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
+  public Map<RegionInfo, ServerName[]> placeSecondaryAndTertiaryRS(
+      Map<RegionInfo, ServerName> primaryRSMap) {
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<>();
+    for (Map.Entry<RegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
       // Get the target region and its primary region server rack
-      HRegionInfo regionInfo = entry.getKey();
+      RegionInfo regionInfo = entry.getKey();
       ServerName primaryRS = entry.getValue();
       try {
         // Create the secondary and tertiary region server pair object.
@@ -306,7 +306,7 @@ public class FavoredNodeAssignmentHelper {
     return secondaryAndTertiaryMap;
   }
 
-  public ServerName[] getSecondaryAndTertiary(HRegionInfo regionInfo, ServerName primaryRS)
+  public ServerName[] getSecondaryAndTertiary(RegionInfo regionInfo, ServerName primaryRS)
       throws IOException {
 
     ServerName[] favoredNodes;// Get the rack for the primary region server
@@ -320,11 +320,11 @@ public class FavoredNodeAssignmentHelper {
     return favoredNodes;
   }
 
-  private Map<ServerName, Set<HRegionInfo>> mapRSToPrimaries(
-      Map<HRegionInfo, ServerName> primaryRSMap) {
-    Map<ServerName, Set<HRegionInfo>> primaryServerMap = new HashMap<>();
-    for (Entry<HRegionInfo, ServerName> e : primaryRSMap.entrySet()) {
-      Set<HRegionInfo> currentSet = primaryServerMap.get(e.getValue());
+  private Map<ServerName, Set<RegionInfo>> mapRSToPrimaries(
+      Map<RegionInfo, ServerName> primaryRSMap) {
+    Map<ServerName, Set<RegionInfo>> primaryServerMap = new HashMap<>();
+    for (Entry<RegionInfo, ServerName> e : primaryRSMap.entrySet()) {
+      Set<RegionInfo> currentSet = primaryServerMap.get(e.getValue());
       if (currentSet == null) {
         currentSet = new HashSet<>();
       }
@@ -341,15 +341,15 @@ public class FavoredNodeAssignmentHelper {
    * @param primaryRSMap
    * @return the map of regions to the servers the region-files should be hosted on
    */
-  public Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions(
-      Map<HRegionInfo, ServerName> primaryRSMap) {
-    Map<ServerName, Set<HRegionInfo>> serverToPrimaries =
+  public Map<RegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions(
+      Map<RegionInfo, ServerName> primaryRSMap) {
+    Map<ServerName, Set<RegionInfo>> serverToPrimaries =
         mapRSToPrimaries(primaryRSMap);
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<>();
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<>();
 
-    for (Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
+    for (Entry<RegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
       // Get the target region and its primary region server rack
-      HRegionInfo regionInfo = entry.getKey();
+      RegionInfo regionInfo = entry.getKey();
       ServerName primaryRS = entry.getValue();
       try {
         // Get the rack for the primary region server
@@ -378,9 +378,9 @@ public class FavoredNodeAssignmentHelper {
   }
 
   private ServerName[] multiRackCaseWithRestrictions(
-      Map<ServerName, Set<HRegionInfo>> serverToPrimaries,
-      Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap,
-      String primaryRack, ServerName primaryRS, HRegionInfo regionInfo) throws IOException {
+      Map<ServerName, Set<RegionInfo>> serverToPrimaries,
+      Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap,
+      String primaryRack, ServerName primaryRS, RegionInfo regionInfo) throws IOException {
     // Random to choose the secondary and tertiary region server
     // from another rack to place the secondary and tertiary
     // Random to choose one rack except for the current rack
@@ -395,13 +395,13 @@ public class FavoredNodeAssignmentHelper {
       // Randomly pick up two servers from this secondary rack
       // Skip the secondary for the tertiary placement
       // skip the servers which share the primary already
-      Set<HRegionInfo> primaries = serverToPrimaries.get(primaryRS);
+      Set<RegionInfo> primaries = serverToPrimaries.get(primaryRS);
       Set<ServerName> skipServerSet = new HashSet<>();
       while (true) {
         ServerName[] secondaryAndTertiary = null;
         if (primaries.size() > 1) {
           // check where his tertiary and secondary are
-          for (HRegionInfo primary : primaries) {
+          for (RegionInfo primary : primaries) {
             secondaryAndTertiary = secondaryAndTertiaryMap.get(primary);
             if (secondaryAndTertiary != null) {
               if (getRackOfServer(secondaryAndTertiary[0]).equals(secondaryRack)) {
@@ -468,7 +468,7 @@ public class FavoredNodeAssignmentHelper {
     return favoredNodes;
   }
 
-  private ServerName[] singleRackCase(HRegionInfo regionInfo,
+  private ServerName[] singleRackCase(RegionInfo regionInfo,
       ServerName primaryRS,
       String primaryRack) throws IOException {
     // Single rack case: have to pick the secondary and tertiary
@@ -516,7 +516,7 @@ public class FavoredNodeAssignmentHelper {
    * @return Array containing secondary and tertiary favored nodes.
    * @throws IOException Signals that an I/O exception has occurred.
    */
-  private ServerName[] multiRackCase(HRegionInfo regionInfo, ServerName primaryRS,
+  private ServerName[] multiRackCase(RegionInfo regionInfo, ServerName primaryRS,
       String primaryRack) throws IOException {
 
     List<ServerName>favoredNodes = Lists.newArrayList(primaryRS);
@@ -764,15 +764,15 @@ public class FavoredNodeAssignmentHelper {
    * Choose a random server as primary and then choose secondary and tertiary FN so its spread
    * across two racks.
    */
-  public List<ServerName> generateFavoredNodes(HRegionInfo hri) throws IOException {
+  public List<ServerName> generateFavoredNodes(RegionInfo hri) throws IOException {
 
     List<ServerName> favoredNodesForRegion = new ArrayList<>(FAVORED_NODES_NUM);
     ServerName primary = servers.get(random.nextInt(servers.size()));
     favoredNodesForRegion.add(ServerName.valueOf(primary.getHostAndPort(), ServerName.NON_STARTCODE));
 
-    Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<>(1);
+    Map<RegionInfo, ServerName> primaryRSMap = new HashMap<>(1);
     primaryRSMap.put(hri, primary);
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryRSMap =
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryRSMap =
         placeSecondaryAndTertiaryRS(primaryRSMap);
     ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(hri);
     if (secondaryAndTertiaryNodes != null && secondaryAndTertiaryNodes.length == 2) {
@@ -785,13 +785,13 @@ public class FavoredNodeAssignmentHelper {
     }
   }
 
-  public Map<HRegionInfo, List<ServerName>> generateFavoredNodesRoundRobin(
-      Map<ServerName, List<HRegionInfo>> assignmentMap, List<HRegionInfo> regions)
+  public Map<RegionInfo, List<ServerName>> generateFavoredNodesRoundRobin(
+      Map<ServerName, List<RegionInfo>> assignmentMap, List<RegionInfo> regions)
       throws IOException {
 
     if (regions.size() > 0) {
       if (canPlaceFavoredNodes()) {
-        Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<>();
+        Map<RegionInfo, ServerName> primaryRSMap = new HashMap<>();
         // Lets try to have an equal distribution for primary favored node
         placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
         return generateFavoredNodes(primaryRSMap);
@@ -806,16 +806,16 @@ public class FavoredNodeAssignmentHelper {
   /*
    * Generate favored nodes for a set of regions when we know where they are currently hosted.
    */
-  private Map<HRegionInfo, List<ServerName>> generateFavoredNodes(
-      Map<HRegionInfo, ServerName> primaryRSMap) {
+  private Map<RegionInfo, List<ServerName>> generateFavoredNodes(
+      Map<RegionInfo, ServerName> primaryRSMap) {
 
-    Map<HRegionInfo, List<ServerName>> generatedFavNodes = new HashMap<>();
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryRSMap =
+    Map<RegionInfo, List<ServerName>> generatedFavNodes = new HashMap<>();
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryRSMap =
       placeSecondaryAndTertiaryRS(primaryRSMap);
 
-    for (Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
+    for (Entry<RegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
       List<ServerName> favoredNodesForRegion = new ArrayList<>(FAVORED_NODES_NUM);
-      HRegionInfo region = entry.getKey();
+      RegionInfo region = entry.getKey();
       ServerName primarySN = entry.getValue();
       favoredNodesForRegion.add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(),
         NON_STARTCODE));