You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/09/28 12:30:43 UTC

[13/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 9e37292..da6afc9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -30,15 +30,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
@@ -59,13 +59,16 @@ import org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * The procedure to Merge a region in a table.
@@ -79,8 +82,8 @@ public class MergeTableRegionsProcedure
   private Boolean traceEnabled;
   private volatile boolean lock = false;
   private ServerName regionLocation;
-  private HRegionInfo[] regionsToMerge;
-  private HRegionInfo mergedRegion;
+  private RegionInfo[] regionsToMerge;
+  private RegionInfo mergedRegion;
   private boolean forcible;
 
   public MergeTableRegionsProcedure() {
@@ -88,18 +91,18 @@ public class MergeTableRegionsProcedure
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-      final HRegionInfo regionToMergeA, final HRegionInfo regionToMergeB) throws IOException {
+      final RegionInfo regionToMergeA, final RegionInfo regionToMergeB) throws IOException {
     this(env, regionToMergeA, regionToMergeB, false);
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-      final HRegionInfo regionToMergeA, final HRegionInfo regionToMergeB,
+      final RegionInfo regionToMergeA, final RegionInfo regionToMergeB,
       final boolean forcible) throws MergeRegionException {
-    this(env, new HRegionInfo[] {regionToMergeA, regionToMergeB}, forcible);
+    this(env, new RegionInfo[] {regionToMergeA, regionToMergeB}, forcible);
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-      final HRegionInfo[] regionsToMerge, final boolean forcible)
+      final RegionInfo[] regionsToMerge, final boolean forcible)
       throws MergeRegionException {
     super(env);
 
@@ -117,7 +120,7 @@ public class MergeTableRegionsProcedure
     this.forcible = forcible;
   }
 
-  private static void checkRegionsToMerge(final HRegionInfo[] regionsToMerge,
+  private static void checkRegionsToMerge(final RegionInfo[] regionsToMerge,
       final boolean forcible) throws MergeRegionException {
     // For now, we only merge 2 regions.
     // It could be extended to more than 2 regions in the future.
@@ -129,19 +132,19 @@ public class MergeTableRegionsProcedure
     checkRegionsToMerge(regionsToMerge[0], regionsToMerge[1], forcible);
   }
 
-  private static void checkRegionsToMerge(final HRegionInfo regionToMergeA,
-      final HRegionInfo regionToMergeB, final boolean forcible) throws MergeRegionException {
+  private static void checkRegionsToMerge(final RegionInfo regionToMergeA,
+      final RegionInfo regionToMergeB, final boolean forcible) throws MergeRegionException {
     if (!regionToMergeA.getTable().equals(regionToMergeB.getTable())) {
       throw new MergeRegionException("Can't merge regions from two different tables: " +
         regionToMergeA + ", " + regionToMergeB);
     }
 
-    if (regionToMergeA.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
-        regionToMergeB.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+    if (regionToMergeA.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID ||
+        regionToMergeB.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
       throw new MergeRegionException("Can't merge non-default replicas");
     }
 
-    if (!HRegionInfo.areAdjacent(regionToMergeA, regionToMergeB)) {
+    if (!RegionInfo.areAdjacent(regionToMergeA, regionToMergeB)) {
       String msg = "Unable to merge not adjacent regions " + regionToMergeA.getShortNameToLog() +
           ", " + regionToMergeB.getShortNameToLog() + " where forcible = " + forcible;
       LOG.warn(msg);
@@ -151,18 +154,18 @@ public class MergeTableRegionsProcedure
     }
   }
 
-  private static HRegionInfo createMergedRegionInfo(final HRegionInfo[] regionsToMerge) {
+  private static RegionInfo createMergedRegionInfo(final RegionInfo[] regionsToMerge) {
     return createMergedRegionInfo(regionsToMerge[0], regionsToMerge[1]);
   }
 
   /**
    * Create merged region info through the specified two regions
    */
-  private static HRegionInfo createMergedRegionInfo(final HRegionInfo regionToMergeA,
-      final HRegionInfo regionToMergeB) {
+  private static RegionInfo createMergedRegionInfo(final RegionInfo regionToMergeA,
+      final RegionInfo regionToMergeB) {
     // Choose the smaller as start key
     final byte[] startKey;
-    if (regionToMergeA.compareTo(regionToMergeB) <= 0) {
+    if (RegionInfo.COMPARATOR.compare(regionToMergeA, regionToMergeB) <= 0) {
       startKey = regionToMergeA.getStartKey();
     } else {
       startKey = regionToMergeB.getStartKey();
@@ -179,12 +182,16 @@ public class MergeTableRegionsProcedure
     }
 
     // Merged region is sorted between two merging regions in META
-    final long rid = getMergedRegionIdTimestamp(regionToMergeA, regionToMergeB);
-    return new HRegionInfo(regionToMergeA.getTable(), startKey, endKey, false, rid);
+    return RegionInfoBuilder.newBuilder(regionToMergeA.getTable())
+        .setStartKey(startKey)
+        .setEndKey(endKey)
+        .setSplit(false)
+        .setRegionId(getMergedRegionIdTimestamp(regionToMergeA, regionToMergeB))
+        .build();
   }
 
-  private static long getMergedRegionIdTimestamp(final HRegionInfo regionToMergeA,
-      final HRegionInfo regionToMergeB) {
+  private static long getMergedRegionIdTimestamp(final RegionInfo regionToMergeA,
+      final RegionInfo regionToMergeB) {
     long rid = EnvironmentEdgeManager.currentTime();
     // Regionid is timestamp. Merged region's id can't be less than that of
     // merging regions else will insert at wrong location in hbase:meta (See HBASE-710).
@@ -252,7 +259,7 @@ public class MergeTableRegionsProcedure
         throw new UnsupportedOperationException(this + " unhandled state=" + state);
       }
     } catch (IOException e) {
-      LOG.warn("Error trying to merge regions " + HRegionInfo.getShortNameToLog(regionsToMerge) +
+      LOG.warn("Error trying to merge regions " + RegionInfo.getShortNameToLog(regionsToMerge) +
         " in the table " + getTableName() + " (in state=" + state + ")", e);
 
       setFailure("master-merge-regions", e);
@@ -305,7 +312,7 @@ public class MergeTableRegionsProcedure
       // This will be retried. Unless there is a bug in the code,
       // this should be just a "temporary error" (e.g. network down)
       LOG.warn("Failed rollback attempt step " + state + " for merging the regions "
-          + HRegionInfo.getShortNameToLog(regionsToMerge) + " in table " + getTableName(), e);
+          + RegionInfo.getShortNameToLog(regionsToMerge) + " in table " + getTableName(), e);
       throw e;
     }
   }
@@ -351,10 +358,10 @@ public class MergeTableRegionsProcedure
     final MasterProcedureProtos.MergeTableRegionsStateData.Builder mergeTableRegionsMsg =
         MasterProcedureProtos.MergeTableRegionsStateData.newBuilder()
         .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
-        .setMergedRegionInfo(HRegionInfo.convert(mergedRegion))
+        .setMergedRegionInfo(ProtobufUtil.toRegionInfo(mergedRegion))
         .setForcible(forcible);
     for (int i = 0; i < regionsToMerge.length; ++i) {
-      mergeTableRegionsMsg.addRegionInfo(HRegionInfo.convert(regionsToMerge[i]));
+      mergeTableRegionsMsg.addRegionInfo(ProtobufUtil.toRegionInfo(regionsToMerge[i]));
     }
     serializer.serialize(mergeTableRegionsMsg.build());
   }
@@ -369,12 +376,12 @@ public class MergeTableRegionsProcedure
     setUser(MasterProcedureUtil.toUserInfo(mergeTableRegionsMsg.getUserInfo()));
 
     assert(mergeTableRegionsMsg.getRegionInfoCount() == 2);
-    regionsToMerge = new HRegionInfo[mergeTableRegionsMsg.getRegionInfoCount()];
+    regionsToMerge = new RegionInfo[mergeTableRegionsMsg.getRegionInfoCount()];
     for (int i = 0; i < regionsToMerge.length; i++) {
-      regionsToMerge[i] = HRegionInfo.convert(mergeTableRegionsMsg.getRegionInfo(i));
+      regionsToMerge[i] = ProtobufUtil.toRegionInfo(mergeTableRegionsMsg.getRegionInfo(i));
     }
 
-    mergedRegion = HRegionInfo.convert(mergeTableRegionsMsg.getMergedRegionInfo());
+    mergedRegion = ProtobufUtil.toRegionInfo(mergeTableRegionsMsg.getMergedRegionInfo());
   }
 
   @Override
@@ -383,7 +390,7 @@ public class MergeTableRegionsProcedure
     sb.append(" table=");
     sb.append(getTableName());
     sb.append(", regions=");
-    sb.append(HRegionInfo.getShortNameToLog(regionsToMerge));
+    sb.append(RegionInfo.getShortNameToLog(regionsToMerge));
     sb.append(", forcibly=");
     sb.append(forcible);
   }
@@ -450,7 +457,7 @@ public class MergeTableRegionsProcedure
     boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(regionsToMerge[0]);
     if (regionAHasMergeQualifier
         || !catalogJanitor.cleanMergeQualifier(regionsToMerge[1])) {
-      String msg = "Skip merging regions " + HRegionInfo.getShortNameToLog(regionsToMerge) +
+      String msg = "Skip merging regions " + RegionInfo.getShortNameToLog(regionsToMerge) +
         ", because region "
         + (regionAHasMergeQualifier ? regionsToMerge[0].getEncodedName() : regionsToMerge[1]
               .getEncodedName()) + " has merge qualifier";
@@ -526,7 +533,7 @@ public class MergeTableRegionsProcedure
       boolean ret = cpHost.preMergeRegionsAction(regionsToMerge, getUser());
       if (ret) {
         throw new IOException(
-          "Coprocessor bypassing regions " + HRegionInfo.getShortNameToLog(regionsToMerge) +
+          "Coprocessor bypassing regions " + RegionInfo.getShortNameToLog(regionsToMerge) +
           " merge.");
       }
     }
@@ -649,7 +656,7 @@ public class MergeTableRegionsProcedure
     int procsIdx = 0;
     for (int i = 0; i < regionsToMerge.length; ++i) {
       for (int j = 0; j < regionReplication; ++j) {
-        final HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(regionsToMerge[i], j);
+        final RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(regionsToMerge[i], j);
         procs[procsIdx++] = env.getAssignmentManager().createAssignProcedure(hri, serverName);
       }
     }
@@ -663,7 +670,7 @@ public class MergeTableRegionsProcedure
     int procsIdx = 0;
     for (int i = 0; i < regionsToMerge.length; ++i) {
       for (int j = 0; j < regionReplication; ++j) {
-        final HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(regionsToMerge[i], j);
+        final RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(regionsToMerge[i], j);
         procs[procsIdx++] = env.getAssignmentManager().createUnassignProcedure(hri,null,true);
       }
     }
@@ -675,7 +682,7 @@ public class MergeTableRegionsProcedure
     final ServerName targetServer = getServerName(env);
     final AssignProcedure[] procs = new AssignProcedure[regionReplication];
     for (int i = 0; i < procs.length; ++i) {
-      final HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(mergedRegion, i);
+      final RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(mergedRegion, i);
       procs[i] = env.getAssignmentManager().createAssignProcedure(hri, targetServer);
     }
     return procs;
@@ -699,12 +706,12 @@ public class MergeTableRegionsProcedure
 
       if (ret) {
         throw new IOException(
-          "Coprocessor bypassing regions " + HRegionInfo.getShortNameToLog(regionsToMerge) +
+          "Coprocessor bypassing regions " + RegionInfo.getShortNameToLog(regionsToMerge) +
           " merge.");
       }
       try {
         for (Mutation p : metaEntries) {
-          HRegionInfo.parseRegionName(p.getRow());
+          RegionInfo.parseRegionName(p.getRow());
         }
       } catch (IOException e) {
         LOG.error("Row key of mutation from coprocessor is not parsable as region name."
@@ -780,7 +787,7 @@ public class MergeTableRegionsProcedure
    * @return The merged region. Maybe be null if called to early or we failed.
    */
   @VisibleForTesting
-  public HRegionInfo getMergedRegion() {
+  public RegionInfo getMergedRegion() {
     return this.mergedRegion;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
index b07298e..624806a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
@@ -23,14 +23,15 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData;
@@ -128,7 +129,7 @@ public class MoveRegionProcedure extends AbstractStateMachineRegionProcedure<Mov
     super.serializeStateData(serializer);
 
     final MoveRegionStateData.Builder state = MoveRegionStateData.newBuilder()
-        // No need to serialize the HRegionInfo. The super class has the region.
+        // No need to serialize the RegionInfo. The super class has the region.
         .setSourceServer(ProtobufUtil.toServerName(plan.getSource()));
     if (plan.getDestination() != null) {
       state.setDestinationServer(ProtobufUtil.toServerName(plan.getDestination()));
@@ -143,7 +144,7 @@ public class MoveRegionProcedure extends AbstractStateMachineRegionProcedure<Mov
     super.deserializeStateData(serializer);
 
     final MoveRegionStateData state = serializer.deserialize(MoveRegionStateData.class);
-    final HRegionInfo regionInfo = getRegion(); // Get it from super class deserialization.
+    final RegionInfo regionInfo = getRegion(); // Get it from super class deserialization.
     final ServerName sourceServer = ProtobufUtil.toServerName(state.getSourceServer());
     final ServerName destinationServer = state.hasDestinationServer() ?
         ProtobufUtil.toServerName(state.getDestinationServer()) : null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 1f43ad3..f9a1b43 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -28,24 +28,24 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
-import org.apache.hadoop.hbase.util.MultiHConnection;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.MultiHConnection;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
@@ -79,7 +79,7 @@ public class RegionStateStore {
   }
 
   public interface RegionStateVisitor {
-    void visitRegionState(HRegionInfo regionInfo, State state,
+    void visitRegionState(RegionInfo regionInfo, State state,
       ServerName regionLocation, ServerName lastHost, long openSeqNum);
   }
 
@@ -114,7 +114,7 @@ public class RegionStateStore {
       final HRegionLocation hrl = locations[i];
       if (hrl == null) continue;
 
-      final HRegionInfo regionInfo = hrl.getRegionInfo();
+      final RegionInfo regionInfo = hrl.getRegionInfo();
       if (regionInfo == null) continue;
 
       final int replicaId = regionInfo.getReplicaId();
@@ -132,7 +132,7 @@ public class RegionStateStore {
     }
   }
 
-  public void updateRegionLocation(final HRegionInfo regionInfo, final State state,
+  public void updateRegionLocation(final RegionInfo regionInfo, final State state,
       final ServerName regionLocation, final ServerName lastHost, final long openSeqNum,
       final long pid)
       throws IOException {
@@ -149,7 +149,7 @@ public class RegionStateStore {
         oldState != null ? oldState.getServerName() : null, openSeqNum, pid);
   }
 
-  protected void updateMetaLocation(final HRegionInfo regionInfo, final ServerName serverName)
+  protected void updateMetaLocation(final RegionInfo regionInfo, final ServerName serverName)
       throws IOException {
     try {
       MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName,
@@ -159,7 +159,7 @@ public class RegionStateStore {
     }
   }
 
-  protected void updateUserRegionLocation(final HRegionInfo regionInfo, final State state,
+  protected void updateUserRegionLocation(final RegionInfo regionInfo, final State state,
       final ServerName regionLocation, final ServerName lastHost, final long openSeqNum,
       final long pid)
       throws IOException {
@@ -195,7 +195,7 @@ public class RegionStateStore {
     }
   }
 
-  protected void updateRegionLocation(final HRegionInfo regionInfo, final State state,
+  protected void updateRegionLocation(final RegionInfo regionInfo, final State state,
       final Put... put) throws IOException {
     synchronized (this) {
       if (multiHConnection == null) {
@@ -219,8 +219,8 @@ public class RegionStateStore {
   // ============================================================================================
   //  Update Region Splitting State helpers
   // ============================================================================================
-  public void splitRegion(final HRegionInfo parent, final HRegionInfo hriA,
-      final HRegionInfo hriB, final ServerName serverName)  throws IOException {
+  public void splitRegion(final RegionInfo parent, final RegionInfo hriA,
+      final RegionInfo hriB, final ServerName serverName)  throws IOException {
     final TableDescriptor htd = getTableDescriptor(parent.getTable());
     MetaTableAccessor.splitRegion(master.getConnection(), parent, hriA, hriB, serverName,
         getRegionReplication(htd), hasSerialReplicationScope(htd));
@@ -229,8 +229,8 @@ public class RegionStateStore {
   // ============================================================================================
   //  Update Region Merging State helpers
   // ============================================================================================
-  public void mergeRegions(final HRegionInfo parent, final HRegionInfo hriA,
-      final HRegionInfo hriB, final ServerName serverName)  throws IOException {
+  public void mergeRegions(final RegionInfo parent, final RegionInfo hriA,
+      final RegionInfo hriB, final ServerName serverName)  throws IOException {
     final TableDescriptor htd = getTableDescriptor(parent.getTable());
     MetaTableAccessor.mergeRegions(master.getConnection(), parent, hriA, hriB, serverName,
         getRegionReplication(htd), EnvironmentEdgeManager.currentTime(),
@@ -240,11 +240,11 @@ public class RegionStateStore {
   // ============================================================================================
   //  Delete Region State helpers
   // ============================================================================================
-  public void deleteRegion(final HRegionInfo regionInfo) throws IOException {
+  public void deleteRegion(final RegionInfo regionInfo) throws IOException {
     deleteRegions(Collections.singletonList(regionInfo));
   }
 
-  public void deleteRegions(final List<HRegionInfo> regions) throws IOException {
+  public void deleteRegions(final List<RegionInfo> regions) throws IOException {
     MetaTableAccessor.deleteRegions(master.getConnection(), regions);
   }
 
@@ -300,7 +300,7 @@ public class RegionStateStore {
     return replicaId == 0
         ? HConstants.SERVERNAME_QUALIFIER
         : Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
-          + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
+          + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
   }
 
   // ==========================================================================
@@ -322,6 +322,6 @@ public class RegionStateStore {
     return replicaId == 0
         ? HConstants.STATE_QUALIFIER
         : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
-          + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
+          + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
index 3a9c34a..c13a49d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
@@ -19,21 +19,6 @@
 
 package org.apache.hadoop.hbase.master.assignment;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.yetus.audience.InterfaceAudience;
-
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -50,6 +35,22 @@ import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+
 /**
  * RegionStates contains a set of Maps that describes the in-memory state of the AM, with
  * the regions available in the system, the region in transition, the offline regions and
@@ -71,8 +72,8 @@ public class RegionStates {
     State.CLOSING                 // already in-progress (retrying)
   };
 
-  private static class AssignmentProcedureEvent extends ProcedureEvent<HRegionInfo> {
-    public AssignmentProcedureEvent(final HRegionInfo regionInfo) {
+  private static class AssignmentProcedureEvent extends ProcedureEvent<RegionInfo> {
+    public AssignmentProcedureEvent(final RegionInfo regionInfo) {
       super(regionInfo);
     }
   }
@@ -96,7 +97,7 @@ public class RegionStates {
   // so for now. Odd is that elsewhere in this RegionStates, we synchronize on
   // the RegionStateNode instance. TODO.
   public static class RegionStateNode implements Comparable<RegionStateNode> {
-    private final HRegionInfo regionInfo;
+    private final RegionInfo regionInfo;
     private final ProcedureEvent<?> event;
 
     private volatile RegionTransitionProcedure procedure = null;
@@ -117,7 +118,7 @@ public class RegionStates {
 
     private volatile long openSeqNum = HConstants.NO_SEQNUM;
 
-    public RegionStateNode(final HRegionInfo regionInfo) {
+    public RegionStateNode(final RegionInfo regionInfo) {
       this.regionInfo = regionInfo;
       this.event = new AssignmentProcedureEvent(regionInfo);
     }
@@ -184,7 +185,6 @@ public class RegionStates {
       this.openSeqNum = seqId;
     }
 
-    
     public ServerName setRegionLocation(final ServerName serverName) {
       ServerName lastRegionLocation = this.regionLocation;
       if (LOG.isTraceEnabled() && serverName == null) {
@@ -219,7 +219,7 @@ public class RegionStates {
       return event;
     }
 
-    public HRegionInfo getRegionInfo() {
+    public RegionInfo getRegionInfo() {
       return regionInfo;
     }
 
@@ -255,9 +255,9 @@ public class RegionStates {
 
     @Override
     public int compareTo(final RegionStateNode other) {
-      // NOTE: HRegionInfo sort by table first, so we are relying on that.
+      // NOTE: RegionInfo sort by table first, so we are relying on that.
       // we have a TestRegionState#testOrderedByTable() that check for that.
-      return getRegionInfo().compareTo(other.getRegionInfo());
+      return RegionInfo.COMPARATOR.compare(getRegionInfo(), other.getRegionInfo());
     }
 
     @Override
@@ -276,7 +276,7 @@ public class RegionStates {
     public String toString() {
       return toDescriptiveString();
     }
- 
+
     public String toShortString() {
       // rit= is the current Region-In-Transition State -- see State enum.
       return String.format("rit=%s, location=%s", getState(), getRegionLocation());
@@ -295,7 +295,7 @@ public class RegionStates {
     @Override
     public int compare(final RegionState l, final RegionState r) {
       int stampCmp = Long.compare(l.getStamp(), r.getStamp());
-      return stampCmp != 0 ? stampCmp : l.getRegion().compareTo(r.getRegion());
+      return stampCmp != 0 ? stampCmp : RegionInfo.COMPARATOR.compare(l.getRegion(), r.getRegion());
     }
   }
 
@@ -357,8 +357,8 @@ public class RegionStates {
       return regions.size();
     }
 
-    public ArrayList<HRegionInfo> getRegionInfoList() {
-      ArrayList<HRegionInfo> hris = new ArrayList<HRegionInfo>(regions.size());
+    public ArrayList<RegionInfo> getRegionInfoList() {
+      ArrayList<RegionInfo> hris = new ArrayList<RegionInfo>(regions.size());
       for (RegionStateNode region: regions) {
         hris.add(region.getRegionInfo());
       }
@@ -401,20 +401,20 @@ public class RegionStates {
 
   // TODO: Replace the ConcurrentSkipListMaps
   /**
-   * RegionName -- i.e. HRegionInfo.getRegionName() -- as bytes to {@link RegionStateNode}
+   * RegionName -- i.e. RegionInfo.getRegionName() -- as bytes to {@link RegionStateNode}
    */
   private final ConcurrentSkipListMap<byte[], RegionStateNode> regionsMap =
       new ConcurrentSkipListMap<byte[], RegionStateNode>(Bytes.BYTES_COMPARATOR);
 
-  private final ConcurrentSkipListMap<HRegionInfo, RegionStateNode> regionInTransition =
-    new ConcurrentSkipListMap<HRegionInfo, RegionStateNode>();
+  private final ConcurrentSkipListMap<RegionInfo, RegionStateNode> regionInTransition =
+    new ConcurrentSkipListMap<RegionInfo, RegionStateNode>(RegionInfo.COMPARATOR);
 
   /**
    * Regions marked as offline on a read of hbase:meta. Unused or at least, once
    * offlined, regions have no means of coming on line again. TODO.
    */
-  private final ConcurrentSkipListMap<HRegionInfo, RegionStateNode> regionOffline =
-    new ConcurrentSkipListMap<HRegionInfo, RegionStateNode>();
+  private final ConcurrentSkipListMap<RegionInfo, RegionStateNode> regionOffline =
+    new ConcurrentSkipListMap<RegionInfo, RegionStateNode>();
 
   private final ConcurrentSkipListMap<byte[], RegionFailedOpen> regionFailedOpen =
     new ConcurrentSkipListMap<byte[], RegionFailedOpen>(Bytes.BYTES_COMPARATOR);
@@ -432,7 +432,7 @@ public class RegionStates {
   }
 
   @VisibleForTesting
-  public boolean isRegionInRegionStates(final HRegionInfo hri) {
+  public boolean isRegionInRegionStates(final RegionInfo hri) {
     return (regionsMap.containsKey(hri.getRegionName()) || regionInTransition.containsKey(hri)
         || regionOffline.containsKey(hri));
   }
@@ -440,13 +440,13 @@ public class RegionStates {
   // ==========================================================================
   //  RegionStateNode helpers
   // ==========================================================================
-  protected RegionStateNode createRegionNode(final HRegionInfo regionInfo) {
+  protected RegionStateNode createRegionNode(final RegionInfo regionInfo) {
     RegionStateNode newNode = new RegionStateNode(regionInfo);
     RegionStateNode oldNode = regionsMap.putIfAbsent(regionInfo.getRegionName(), newNode);
     return oldNode != null ? oldNode : newNode;
   }
 
-  protected RegionStateNode getOrCreateRegionNode(final HRegionInfo regionInfo) {
+  protected RegionStateNode getOrCreateRegionNode(final RegionInfo regionInfo) {
     RegionStateNode node = regionsMap.get(regionInfo.getRegionName());
     return node != null ? node : createRegionNode(regionInfo);
   }
@@ -455,7 +455,7 @@ public class RegionStates {
     return regionsMap.get(regionName);
   }
 
-  protected RegionStateNode getRegionNode(final HRegionInfo regionInfo) {
+  protected RegionStateNode getRegionNode(final RegionInfo regionInfo) {
     return getRegionNodeFromName(regionInfo.getRegionName());
   }
 
@@ -469,7 +469,7 @@ public class RegionStates {
     return null;
   }
 
-  public void deleteRegion(final HRegionInfo regionInfo) {
+  public void deleteRegion(final RegionInfo regionInfo) {
     regionsMap.remove(regionInfo.getRegionName());
     // Remove from the offline regions map too if there.
     if (this.regionOffline.containsKey(regionInfo)) {
@@ -496,8 +496,8 @@ public class RegionStates {
     return regions;
   }
 
-  ArrayList<HRegionInfo> getTableRegionsInfo(final TableName tableName) {
-    final ArrayList<HRegionInfo> regions = new ArrayList<HRegionInfo>();
+  ArrayList<RegionInfo> getTableRegionsInfo(final TableName tableName) {
+    final ArrayList<RegionInfo> regions = new ArrayList<RegionInfo>();
     for (RegionStateNode node: regionsMap.tailMap(tableName.getName()).values()) {
       if (!node.getTable().equals(tableName)) break;
       regions.add(node.getRegionInfo());
@@ -520,7 +520,7 @@ public class RegionStates {
   // ==========================================================================
   //  RegionState helpers
   // ==========================================================================
-  public RegionState getRegionState(final HRegionInfo regionInfo) {
+  public RegionState getRegionState(final RegionInfo regionInfo) {
     return createRegionState(getRegionNode(regionInfo));
   }
 
@@ -542,13 +542,13 @@ public class RegionStates {
     return !getTableRegionStates(tableName).isEmpty();
   }
 
-  public List<HRegionInfo> getRegionsOfTable(final TableName table) {
+  public List<RegionInfo> getRegionsOfTable(final TableName table) {
     return getRegionsOfTable(table, false);
   }
 
-  List<HRegionInfo> getRegionsOfTable(final TableName table, final boolean offline) {
+  List<RegionInfo> getRegionsOfTable(final TableName table, final boolean offline) {
     final ArrayList<RegionStateNode> nodes = getTableRegionStateNodes(table);
-    final ArrayList<HRegionInfo> hris = new ArrayList<HRegionInfo>(nodes.size());
+    final ArrayList<RegionInfo> hris = new ArrayList<RegionInfo>(nodes.size());
     for (RegionStateNode node: nodes) {
       if (include(node, offline)) hris.add(node.getRegionInfo());
     }
@@ -567,7 +567,7 @@ public class RegionStates {
     }
     if (node.isInState(State.SPLIT)) return false;
     if (node.isInState(State.OFFLINE) && !offline) return false;
-    final HRegionInfo hri = node.getRegionInfo();
+    final RegionInfo hri = node.getRegionInfo();
     return (!hri.isOffline() && !hri.isSplit()) ||
         ((hri.isOffline() || hri.isSplit()) && offline);
   }
@@ -575,9 +575,9 @@ public class RegionStates {
   /**
    * Returns the set of regions hosted by the specified server
    * @param serverName the server we are interested in
-   * @return set of HRegionInfo hosted by the specified server
+   * @return set of RegionInfo hosted by the specified server
    */
-  public List<HRegionInfo> getServerRegionInfoSet(final ServerName serverName) {
+  public List<RegionInfo> getServerRegionInfoSet(final ServerName serverName) {
     final ServerStateNode serverInfo = getServerNode(serverName);
     if (serverInfo == null) return Collections.emptyList();
 
@@ -603,7 +603,7 @@ public class RegionStates {
     }
   }
 
-  public void logSplit(final HRegionInfo regionInfo) {
+  public void logSplit(final RegionInfo regionInfo) {
     final RegionStateNode regionNode = getRegionNode(regionInfo);
     synchronized (regionNode) {
       regionNode.setState(State.SPLIT);
@@ -611,7 +611,7 @@ public class RegionStates {
   }
 
   @VisibleForTesting
-  public void updateRegionState(final HRegionInfo regionInfo, final State state) {
+  public void updateRegionState(final RegionInfo regionInfo, final State state) {
     final RegionStateNode regionNode = getOrCreateRegionNode(regionInfo);
     synchronized (regionNode) {
       regionNode.setState(state);
@@ -621,8 +621,8 @@ public class RegionStates {
   // ============================================================================================
   //  TODO:
   // ============================================================================================
-  public List<HRegionInfo> getAssignedRegions() {
-    final List<HRegionInfo> result = new ArrayList<HRegionInfo>();
+  public List<RegionInfo> getAssignedRegions() {
+    final List<RegionInfo> result = new ArrayList<RegionInfo>();
     for (RegionStateNode node: regionsMap.values()) {
       if (!node.isInTransition()) {
         result.add(node.getRegionInfo());
@@ -631,7 +631,7 @@ public class RegionStates {
     return result;
   }
 
-  public boolean isRegionInState(final HRegionInfo regionInfo, final State... state) {
+  public boolean isRegionInState(final RegionInfo regionInfo, final State... state) {
     final RegionStateNode region = getRegionNode(regionInfo);
     if (region != null) {
       synchronized (region) {
@@ -641,21 +641,21 @@ public class RegionStates {
     return false;
   }
 
-  public boolean isRegionOnline(final HRegionInfo regionInfo) {
+  public boolean isRegionOnline(final RegionInfo regionInfo) {
     return isRegionInState(regionInfo, State.OPEN);
   }
 
   /**
    * @return True if region is offline (In OFFLINE or CLOSED state).
    */
-  public boolean isRegionOffline(final HRegionInfo regionInfo) {
+  public boolean isRegionOffline(final RegionInfo regionInfo) {
     return isRegionInState(regionInfo, State.OFFLINE, State.CLOSED);
   }
 
-  public Map<ServerName, List<HRegionInfo>> getSnapShotOfAssignment(
-      final Collection<HRegionInfo> regions) {
-    final Map<ServerName, List<HRegionInfo>> result = new HashMap<ServerName, List<HRegionInfo>>();
-    for (HRegionInfo hri: regions) {
+  public Map<ServerName, List<RegionInfo>> getSnapShotOfAssignment(
+      final Collection<RegionInfo> regions) {
+    final Map<ServerName, List<RegionInfo>> result = new HashMap<ServerName, List<RegionInfo>>();
+    for (RegionInfo hri: regions) {
       final RegionStateNode node = getRegionNode(hri);
       if (node == null) continue;
 
@@ -663,9 +663,9 @@ public class RegionStates {
       final ServerName serverName = node.getRegionLocation();
       if (serverName == null) continue;
 
-      List<HRegionInfo> serverRegions = result.get(serverName);
+      List<RegionInfo> serverRegions = result.get(serverName);
       if (serverRegions == null) {
-        serverRegions = new ArrayList<HRegionInfo>();
+        serverRegions = new ArrayList<RegionInfo>();
         result.put(serverName, serverRegions);
       }
 
@@ -674,20 +674,20 @@ public class RegionStates {
     return result;
   }
 
-  public Map<HRegionInfo, ServerName> getRegionAssignments() {
-    final HashMap<HRegionInfo, ServerName> assignments = new HashMap<HRegionInfo, ServerName>();
+  public Map<RegionInfo, ServerName> getRegionAssignments() {
+    final HashMap<RegionInfo, ServerName> assignments = new HashMap<RegionInfo, ServerName>();
     for (RegionStateNode node: regionsMap.values()) {
       assignments.put(node.getRegionInfo(), node.getRegionLocation());
     }
     return assignments;
   }
 
-  public Map<RegionState.State, List<HRegionInfo>> getRegionByStateOfTable(TableName tableName) {
+  public Map<RegionState.State, List<RegionInfo>> getRegionByStateOfTable(TableName tableName) {
     final State[] states = State.values();
-    final Map<RegionState.State, List<HRegionInfo>> tableRegions =
-        new HashMap<State, List<HRegionInfo>>(states.length);
+    final Map<RegionState.State, List<RegionInfo>> tableRegions =
+        new HashMap<State, List<RegionInfo>>(states.length);
     for (int i = 0; i < states.length; ++i) {
-      tableRegions.put(states[i], new ArrayList<HRegionInfo>());
+      tableRegions.put(states[i], new ArrayList<RegionInfo>());
     }
 
     for (RegionStateNode node: regionsMap.values()) {
@@ -698,7 +698,7 @@ public class RegionStates {
     return tableRegions;
   }
 
-  public ServerName getRegionServerOfRegion(final HRegionInfo regionInfo) {
+  public ServerName getRegionServerOfRegion(final RegionInfo regionInfo) {
     final RegionStateNode region = getRegionNode(regionInfo);
     if (region != null) {
       synchronized (region) {
@@ -717,29 +717,29 @@ public class RegionStates {
    * @param forceByCluster a flag to force to aggregate the server-load to the cluster level
    * @return A clone of current assignments by table.
    */
-  public Map<TableName, Map<ServerName, List<HRegionInfo>>> getAssignmentsByTable(
+  public Map<TableName, Map<ServerName, List<RegionInfo>>> getAssignmentsByTable(
       final boolean forceByCluster) {
     if (!forceByCluster) return getAssignmentsByTable();
 
-    final HashMap<ServerName, List<HRegionInfo>> ensemble =
-      new HashMap<ServerName, List<HRegionInfo>>(serverMap.size());
+    final HashMap<ServerName, List<RegionInfo>> ensemble =
+      new HashMap<ServerName, List<RegionInfo>>(serverMap.size());
     for (ServerStateNode serverNode: serverMap.values()) {
       ensemble.put(serverNode.getServerName(), serverNode.getRegionInfoList());
     }
 
     // TODO: can we use Collections.singletonMap(HConstants.ENSEMBLE_TABLE_NAME, ensemble)?
-    final Map<TableName, Map<ServerName, List<HRegionInfo>>> result =
-      new HashMap<TableName, Map<ServerName, List<HRegionInfo>>>(1);
+    final Map<TableName, Map<ServerName, List<RegionInfo>>> result =
+      new HashMap<TableName, Map<ServerName, List<RegionInfo>>>(1);
     result.put(HConstants.ENSEMBLE_TABLE_NAME, ensemble);
     return result;
   }
 
-  public Map<TableName, Map<ServerName, List<HRegionInfo>>> getAssignmentsByTable() {
-    final Map<TableName, Map<ServerName, List<HRegionInfo>>> result = new HashMap<>();
+  public Map<TableName, Map<ServerName, List<RegionInfo>>> getAssignmentsByTable() {
+    final Map<TableName, Map<ServerName, List<RegionInfo>>> result = new HashMap<>();
     for (RegionStateNode node: regionsMap.values()) {
-      Map<ServerName, List<HRegionInfo>> tableResult = result.get(node.getTable());
+      Map<ServerName, List<RegionInfo>> tableResult = result.get(node.getTable());
       if (tableResult == null) {
-        tableResult = new HashMap<ServerName, List<HRegionInfo>>();
+        tableResult = new HashMap<ServerName, List<RegionInfo>>();
         result.put(node.getTable(), tableResult);
       }
 
@@ -748,9 +748,9 @@ public class RegionStates {
         LOG.info("Skipping, no server for " + node);
         continue;
       }
-      List<HRegionInfo> serverResult = tableResult.get(serverName);
+      List<RegionInfo> serverResult = tableResult.get(serverName);
       if (serverResult == null) {
-        serverResult = new ArrayList<HRegionInfo>();
+        serverResult = new ArrayList<RegionInfo>();
         tableResult.put(serverName, serverResult);
       }
 
@@ -780,7 +780,7 @@ public class RegionStates {
     return !regionInTransition.isEmpty();
   }
 
-  public boolean isRegionInTransition(final HRegionInfo regionInfo) {
+  public boolean isRegionInTransition(final RegionInfo regionInfo) {
     final RegionStateNode node = regionInTransition.get(regionInfo);
     return node != null ? node.isInTransition() : false;
   }
@@ -788,13 +788,13 @@ public class RegionStates {
   /**
    * @return If a procedure-in-transition for <code>hri</code>, return it else null.
    */
-  public RegionTransitionProcedure getRegionTransitionProcedure(final HRegionInfo hri) {
+  public RegionTransitionProcedure getRegionTransitionProcedure(final RegionInfo hri) {
     RegionStateNode node = regionInTransition.get(hri);
     if (node == null) return null;
     return node.getProcedure();
   }
 
-  public RegionState getRegionTransitionState(final HRegionInfo hri) {
+  public RegionState getRegionTransitionState(final RegionInfo hri) {
     RegionStateNode node = regionInTransition.get(hri);
     if (node == null) return null;
 
@@ -840,7 +840,7 @@ public class RegionStates {
   }
 
   // TODO: Unused.
-  public void removeFromOfflineRegions(final HRegionInfo regionInfo) {
+  public void removeFromOfflineRegions(final RegionInfo regionInfo) {
     regionOffline.remove(regionInfo);
   }
 
@@ -861,7 +861,7 @@ public class RegionStates {
       return regionNode;
     }
 
-    public HRegionInfo getRegionInfo() {
+    public RegionInfo getRegionInfo() {
       return regionNode.getRegionInfo();
     }
 
@@ -893,11 +893,11 @@ public class RegionStates {
     return node;
   }
 
-  public RegionFailedOpen getFailedOpen(final HRegionInfo regionInfo) {
+  public RegionFailedOpen getFailedOpen(final RegionInfo regionInfo) {
     return regionFailedOpen.get(regionInfo.getRegionName());
   }
 
-  public void removeFromFailedOpen(final HRegionInfo regionInfo) {
+  public void removeFromFailedOpen(final RegionInfo regionInfo) {
     regionFailedOpen.remove(regionInfo.getRegionName());
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
index 9a10e2b..6f54dcf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
@@ -24,10 +24,9 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
 import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -36,6 +35,8 @@ import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 
@@ -89,7 +90,7 @@ public abstract class RegionTransitionProcedure
 
   private RegionTransitionState transitionState =
       RegionTransitionState.REGION_TRANSITION_QUEUE;
-  private HRegionInfo regionInfo;
+  private RegionInfo regionInfo;
   private volatile boolean lock = false;
 
   public RegionTransitionProcedure() {
@@ -97,22 +98,22 @@ public abstract class RegionTransitionProcedure
     super();
   }
 
-  public RegionTransitionProcedure(final HRegionInfo regionInfo) {
+  public RegionTransitionProcedure(final RegionInfo regionInfo) {
     this.regionInfo = regionInfo;
   }
 
-  public HRegionInfo getRegionInfo() {
+  public RegionInfo getRegionInfo() {
     return regionInfo;
   }
 
-  protected void setRegionInfo(final HRegionInfo regionInfo) {
+  protected void setRegionInfo(final RegionInfo regionInfo) {
     // Setter is for deserialization.
     this.regionInfo = regionInfo;
   }
 
   @Override
   public TableName getTableName() {
-    HRegionInfo hri = getRegionInfo();
+    RegionInfo hri = getRegionInfo();
     return hri != null? hri.getTable(): null;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index f67aa5b..cbd334e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -40,12 +40,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -72,6 +73,7 @@ import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
@@ -86,8 +88,8 @@ public class SplitTableRegionProcedure
     extends AbstractStateMachineRegionProcedure<SplitTableRegionState> {
   private static final Log LOG = LogFactory.getLog(SplitTableRegionProcedure.class);
   private Boolean traceEnabled = null;
-  private HRegionInfo daughter_1_HRI;
-  private HRegionInfo daughter_2_HRI;
+  private RegionInfo daughter_1_RI;
+  private RegionInfo daughter_2_RI;
   private byte[] bestSplitRow;
 
   public SplitTableRegionProcedure() {
@@ -95,14 +97,24 @@ public class SplitTableRegionProcedure
   }
 
   public SplitTableRegionProcedure(final MasterProcedureEnv env,
-      final HRegionInfo regionToSplit, final byte[] splitRow) throws IOException {
+      final RegionInfo regionToSplit, final byte[] splitRow) throws IOException {
     super(env, regionToSplit);
     this.bestSplitRow = splitRow;
     checkSplittable(env, regionToSplit, bestSplitRow);
     final TableName table = regionToSplit.getTable();
     final long rid = getDaughterRegionIdTimestamp(regionToSplit);
-    this.daughter_1_HRI = new HRegionInfo(table, regionToSplit.getStartKey(), bestSplitRow, false, rid);
-    this.daughter_2_HRI = new HRegionInfo(table, bestSplitRow, regionToSplit.getEndKey(), false, rid);
+    this.daughter_1_RI = RegionInfoBuilder.newBuilder(table)
+        .setStartKey(regionToSplit.getStartKey())
+        .setEndKey(bestSplitRow)
+        .setSplit(false)
+        .setRegionId(rid)
+        .build();
+    this.daughter_2_RI = RegionInfoBuilder.newBuilder(table)
+        .setStartKey(bestSplitRow)
+        .setEndKey(regionToSplit.getEndKey())
+        .setSplit(false)
+        .setRegionId(rid)
+        .build();
   }
 
   /**
@@ -113,10 +125,10 @@ public class SplitTableRegionProcedure
    * @throws IOException
    */
   private void checkSplittable(final MasterProcedureEnv env,
-      final HRegionInfo regionToSplit, final byte[] splitRow) throws IOException {
+      final RegionInfo regionToSplit, final byte[] splitRow) throws IOException {
     // Ask the remote RS if this region is splittable.
     // If we get an IOE, report it along w/ the failure so can see why we are not splittable at this time.
-    if(regionToSplit.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+    if(regionToSplit.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
       throw new IllegalArgumentException ("Can't invoke split on non-default regions directly");
     }
     RegionStateNode node =
@@ -169,10 +181,10 @@ public class SplitTableRegionProcedure
 
   /**
    * Calculate daughter regionid to use.
-   * @param hri Parent {@link HRegionInfo}
+   * @param hri Parent {@link RegionInfo}
    * @return Daughter region id (timestamp) to use.
    */
-  private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) {
+  private static long getDaughterRegionIdTimestamp(final RegionInfo hri) {
     long rid = EnvironmentEdgeManager.currentTime();
     // Regionid is timestamp.  Can't be less than that of parent else will insert
     // at wrong location in hbase:meta (See HBASE-710).
@@ -332,9 +344,9 @@ public class SplitTableRegionProcedure
     final MasterProcedureProtos.SplitTableRegionStateData.Builder splitTableRegionMsg =
         MasterProcedureProtos.SplitTableRegionStateData.newBuilder()
         .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
-        .setParentRegionInfo(HRegionInfo.convert(getRegion()))
-        .addChildRegionInfo(HRegionInfo.convert(daughter_1_HRI))
-        .addChildRegionInfo(HRegionInfo.convert(daughter_2_HRI));
+        .setParentRegionInfo(ProtobufUtil.toRegionInfo(getRegion()))
+        .addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_1_RI))
+        .addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_2_RI));
     serializer.serialize(splitTableRegionMsg.build());
   }
 
@@ -346,10 +358,10 @@ public class SplitTableRegionProcedure
     final MasterProcedureProtos.SplitTableRegionStateData splitTableRegionsMsg =
         serializer.deserialize(MasterProcedureProtos.SplitTableRegionStateData.class);
     setUser(MasterProcedureUtil.toUserInfo(splitTableRegionsMsg.getUserInfo()));
-    setRegion(HRegionInfo.convert(splitTableRegionsMsg.getParentRegionInfo()));
+    setRegion(ProtobufUtil.toRegionInfo(splitTableRegionsMsg.getParentRegionInfo()));
     assert(splitTableRegionsMsg.getChildRegionInfoCount() == 2);
-    daughter_1_HRI = HRegionInfo.convert(splitTableRegionsMsg.getChildRegionInfo(0));
-    daughter_2_HRI = HRegionInfo.convert(splitTableRegionsMsg.getChildRegionInfo(1));
+    daughter_1_RI = ProtobufUtil.toRegionInfo(splitTableRegionsMsg.getChildRegionInfo(0));
+    daughter_2_RI = ProtobufUtil.toRegionInfo(splitTableRegionsMsg.getChildRegionInfo(1));
   }
 
   @Override
@@ -360,12 +372,12 @@ public class SplitTableRegionProcedure
     sb.append(", parent=");
     sb.append(getParentRegion().getShortNameToLog());
     sb.append(", daughterA=");
-    sb.append(daughter_1_HRI.getShortNameToLog());
+    sb.append(daughter_1_RI.getShortNameToLog());
     sb.append(", daughterB=");
-    sb.append(daughter_2_HRI.getShortNameToLog());
+    sb.append(daughter_2_RI.getShortNameToLog());
   }
 
-  private HRegionInfo getParentRegion() {
+  private RegionInfo getParentRegion() {
     return getRegion();
   }
 
@@ -380,7 +392,7 @@ public class SplitTableRegionProcedure
   }
 
   private byte[] getSplitRow() {
-    return daughter_2_HRI.getStartKey();
+    return daughter_2_RI.getStartKey();
   }
 
   private static State [] EXPECTED_SPLIT_STATES = new State [] {State.OPEN, State.CLOSED};
@@ -394,7 +406,7 @@ public class SplitTableRegionProcedure
     // Check whether the region is splittable
     RegionStateNode node =
       env.getAssignmentManager().getRegionStates().getRegionNode(getParentRegion());
-    HRegionInfo parentHRI = null;
+    RegionInfo parentHRI = null;
     if (node != null) {
       parentHRI = node.getRegionInfo();
 
@@ -479,7 +491,7 @@ public class SplitTableRegionProcedure
 
     final AssignProcedure[] procs = new AssignProcedure[regionReplication];
     for (int i = 0; i < regionReplication; ++i) {
-      final HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(getParentRegion(), i);
+      final RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(getParentRegion(), i);
       procs[i] = env.getAssignmentManager().createAssignProcedure(hri, serverName);
     }
     env.getMasterServices().getMasterProcedureExecutor().submitProcedures(procs);
@@ -502,17 +514,17 @@ public class SplitTableRegionProcedure
     Pair<Integer, Integer> expectedReferences = splitStoreFiles(env, regionFs);
 
     assertReferenceFileCount(fs, expectedReferences.getFirst(),
-      regionFs.getSplitsDir(daughter_1_HRI));
+      regionFs.getSplitsDir(daughter_1_RI));
     //Move the files from the temporary .splits to the final /table/region directory
-    regionFs.commitDaughterRegion(daughter_1_HRI);
+    regionFs.commitDaughterRegion(daughter_1_RI);
     assertReferenceFileCount(fs, expectedReferences.getFirst(),
-      new Path(tabledir, daughter_1_HRI.getEncodedName()));
+      new Path(tabledir, daughter_1_RI.getEncodedName()));
 
     assertReferenceFileCount(fs, expectedReferences.getSecond(),
-      regionFs.getSplitsDir(daughter_2_HRI));
-    regionFs.commitDaughterRegion(daughter_2_HRI);
+      regionFs.getSplitsDir(daughter_2_RI));
+    regionFs.commitDaughterRegion(daughter_2_RI);
     assertReferenceFileCount(fs, expectedReferences.getSecond(),
-      new Path(tabledir, daughter_2_HRI.getEncodedName()));
+      new Path(tabledir, daughter_2_RI.getEncodedName()));
   }
 
   /**
@@ -650,9 +662,9 @@ public class SplitTableRegionProcedure
     final byte[] splitRow = getSplitRow();
     final String familyName = Bytes.toString(family);
     final Path path_first =
-        regionFs.splitStoreFile(this.daughter_1_HRI, familyName, sf, splitRow, false, null);
+        regionFs.splitStoreFile(this.daughter_1_RI, familyName, sf, splitRow, false, null);
     final Path path_second =
-        regionFs.splitStoreFile(this.daughter_2_HRI, familyName, sf, splitRow, true, null);
+        regionFs.splitStoreFile(this.daughter_2_RI, familyName, sf, splitRow, true, null);
     if (LOG.isDebugEnabled()) {
       LOG.debug("pid=" + getProcId() + " splitting complete for store file: " +
           sf.getPath() + " for region: " + getParentRegion().getShortNameToLog());
@@ -702,7 +714,7 @@ public class SplitTableRegionProcedure
       }
       try {
         for (Mutation p : metaEntries) {
-          HRegionInfo.parseRegionName(p.getRow());
+          RegionInfo.parseRegionName(p.getRow());
         }
       } catch (IOException e) {
         LOG.error("pid=" + getProcId() + " row key of mutation from coprocessor not parsable as "
@@ -720,7 +732,7 @@ public class SplitTableRegionProcedure
    */
   private void updateMetaForDaughterRegions(final MasterProcedureEnv env) throws IOException {
     env.getAssignmentManager().markRegionAsSplit(getParentRegion(), getParentRegionServerName(env),
-      daughter_1_HRI, daughter_2_HRI);
+      daughter_1_RI, daughter_2_RI);
   }
 
   /**
@@ -742,7 +754,7 @@ public class SplitTableRegionProcedure
   private void postSplitRegion(final MasterProcedureEnv env) throws IOException {
     final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
     if (cpHost != null) {
-      cpHost.postCompletedSplitRegionAction(daughter_1_HRI, daughter_2_HRI, getUser());
+      cpHost.postCompletedSplitRegionAction(daughter_1_RI, daughter_2_RI, getUser());
     }
   }
 
@@ -755,7 +767,7 @@ public class SplitTableRegionProcedure
       final int regionReplication) {
     final UnassignProcedure[] procs = new UnassignProcedure[regionReplication];
     for (int i = 0; i < procs.length; ++i) {
-      final HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(getParentRegion(), i);
+      final RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(getParentRegion(), i);
       procs[i] = env.getAssignmentManager().createUnassignProcedure(hri, null, true);
     }
     return procs;
@@ -767,11 +779,11 @@ public class SplitTableRegionProcedure
     final AssignProcedure[] procs = new AssignProcedure[regionReplication * 2];
     int procsIdx = 0;
     for (int i = 0; i < regionReplication; ++i) {
-      final HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(daughter_1_HRI, i);
+      final RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(daughter_1_RI, i);
       procs[procsIdx++] = env.getAssignmentManager().createAssignProcedure(hri, targetServer);
     }
     for (int i = 0; i < regionReplication; ++i) {
-      final HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(daughter_2_HRI, i);
+      final RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(daughter_2_RI, i);
       procs[procsIdx++] = env.getAssignmentManager().createAssignProcedure(hri, targetServer);
     }
     return procs;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
index 4cb6368..66277be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
@@ -20,29 +20,31 @@
 package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.master.procedure.ServerCrashException;
 import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionCloseOperation;
-import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.master.procedure.ServerCrashException;
 import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
+import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 
 /**
  * Procedure that describes the unassignment of a single region.
@@ -90,12 +92,12 @@ public class UnassignProcedure extends RegionTransitionProcedure {
     super();
   }
 
-  public UnassignProcedure(final HRegionInfo regionInfo,  final ServerName hostingServer,
+  public UnassignProcedure(final RegionInfo regionInfo,  final ServerName hostingServer,
                            final boolean force) {
     this(regionInfo, hostingServer, null, force);
   }
 
-  public UnassignProcedure(final HRegionInfo regionInfo,
+  public UnassignProcedure(final RegionInfo regionInfo,
       final ServerName hostingServer, final ServerName destinationServer, final boolean force) {
     super(regionInfo);
     this.hostingServer = hostingServer;
@@ -128,7 +130,7 @@ public class UnassignProcedure extends RegionTransitionProcedure {
     UnassignRegionStateData.Builder state = UnassignRegionStateData.newBuilder()
         .setTransitionState(getTransitionState())
         .setHostingServer(ProtobufUtil.toServerName(this.hostingServer))
-        .setRegionInfo(HRegionInfo.convert(getRegionInfo()));
+        .setRegionInfo(ProtobufUtil.toRegionInfo(getRegionInfo()));
     if (this.destinationServer != null) {
       state.setDestinationServer(ProtobufUtil.toServerName(destinationServer));
     }
@@ -144,7 +146,7 @@ public class UnassignProcedure extends RegionTransitionProcedure {
     final UnassignRegionStateData state =
         serializer.deserialize(UnassignRegionStateData.class);
     setTransitionState(state.getTransitionState());
-    setRegionInfo(HRegionInfo.convert(state.getRegionInfo()));
+    setRegionInfo(ProtobufUtil.toRegionInfo(state.getRegionInfo()));
     this.hostingServer = ProtobufUtil.toServerName(state.getHostingServer());
     force = state.getForce();
     if (state.hasDestinationServer()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java
index 156fe7a..e6b1495 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java
@@ -19,11 +19,12 @@ package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -43,13 +44,13 @@ class Util {
    * @throws IOException Let it out so can report this IOE as reason for failure
    */
   static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env,
-      final ServerName regionLocation, final HRegionInfo hri)
+      final ServerName regionLocation, final RegionInfo hri)
   throws IOException {
     return getRegionInfoResponse(env, regionLocation, hri, false);
   }
 
   static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env,
-      final ServerName regionLocation, final HRegionInfo hri, boolean includeBestSplitRow)
+      final ServerName regionLocation, final RegionInfo hri, boolean includeBestSplitRow)
   throws IOException {
     // TODO: There is no timeout on this controller. Set one!
     HBaseRpcController controller = env.getMasterServices().getClusterConnection().