You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/09/28 12:30:36 UTC

[06/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
index 1acfcde..6307210 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
@@ -34,14 +34,13 @@ import java.util.Optional;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -227,13 +226,13 @@ public class TestAsyncClusterAdminApi extends TestAsyncAdminBase {
     // Check if regions match with the regionLoad from the server
     Collection<ServerName> servers = admin.getRegionServers().get();
     for (ServerName serverName : servers) {
-      List<HRegionInfo> regions = admin.getOnlineRegions(serverName).get();
+      List<RegionInfo> regions = admin.getOnlineRegions(serverName).get();
       checkRegionsAndRegionLoads(regions, admin.getRegionLoads(serverName).get());
     }
 
     // Check if regionLoad matches the table's regions and nothing is missed
     for (TableName table : tables) {
-      List<HRegionInfo> tableRegions = admin.getTableRegions(table).get();
+      List<RegionInfo> tableRegions = admin.getTableRegions(table).get();
       List<RegionLoad> regionLoads = Lists.newArrayList();
       for (ServerName serverName : servers) {
         regionLoads.addAll(admin.getRegionLoads(serverName, Optional.of(table)).get());
@@ -268,7 +267,7 @@ public class TestAsyncClusterAdminApi extends TestAsyncAdminBase {
     }
   }
 
-  private void checkRegionsAndRegionLoads(Collection<HRegionInfo> regions,
+  private void checkRegionsAndRegionLoads(Collection<RegionInfo> regions,
       Collection<RegionLoad> regionLoads) {
 
     assertEquals("No of regions and regionloads doesn't match", regions.size(), regionLoads.size());
@@ -277,7 +276,7 @@ public class TestAsyncClusterAdminApi extends TestAsyncAdminBase {
     for (RegionLoad regionLoad : regionLoads) {
       regionLoadMap.put(regionLoad.getName(), regionLoad);
     }
-    for (HRegionInfo info : regions) {
+    for (RegionInfo info : regions) {
       assertTrue("Region not in regionLoadMap region:" + info.getRegionNameAsString()
           + " regionMap: " + regionLoadMap, regionLoadMap.containsKey(info.getRegionName()));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
index 6292b10..9775b86 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
@@ -33,7 +33,6 @@ import java.util.stream.Collectors;
 
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -45,7 +44,6 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -58,6 +56,8 @@ import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
 /**
  * Class to test asynchronous region admin operations.
  */
@@ -69,10 +69,10 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
   public void testCloseRegion() throws Exception {
     createTableWithDefaultConf(tableName);
 
-    HRegionInfo info = null;
+    RegionInfo info = null;
     HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName);
-    List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
-    for (HRegionInfo regionInfo : onlineRegions) {
+    List<RegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
+    for (RegionInfo regionInfo : onlineRegions) {
       if (!regionInfo.getTable().isSystemTable()) {
         info = regionInfo;
         boolean closed = admin.closeRegion(regionInfo.getRegionName(),
@@ -94,10 +94,10 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
   public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception {
     createTableWithDefaultConf(tableName);
 
-    HRegionInfo info = null;
+    RegionInfo info = null;
     HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName);
-    List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
-    for (HRegionInfo regionInfo : onlineRegions) {
+    List<RegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
+    for (RegionInfo regionInfo : onlineRegions) {
       if (!regionInfo.isMetaTable()) {
         if (regionInfo.getRegionNameAsString().contains(tableName.getNameAsString())) {
           info = regionInfo;
@@ -123,8 +123,8 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     createTableWithDefaultConf(tableName);
 
     HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName);
-    List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
-    for (HRegionInfo regionInfo : onlineRegions) {
+    List<RegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
+    for (RegionInfo regionInfo : onlineRegions) {
       if (!regionInfo.isMetaTable()) {
         if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegionWhenServerNameIsEmpty")) {
           admin.closeRegion(regionInfo.getRegionName(), Optional.empty()).get();
@@ -139,7 +139,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
     AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(tableName);
     HRegionLocation regionLocation = locator.getRegionLocation(Bytes.toBytes("mmm")).get();
-    HRegionInfo region = regionLocation.getRegionInfo();
+    RegionInfo region = regionLocation.getRegionInfo();
     byte[] regionName = regionLocation.getRegionInfo().getRegionName();
     HRegionLocation location = rawAdmin.getRegionLocation(regionName).get();
     assertTrue(Bytes.equals(regionName, location.getRegionInfo().getRegionName()));
@@ -154,7 +154,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     // assign region.
     HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
     AssignmentManager am = master.getAssignmentManager();
-    HRegionInfo hri = am.getRegionStates().getRegionsOfTable(tableName).get(0);
+    RegionInfo hri = am.getRegionStates().getRegionsOfTable(tableName).get(0);
 
     // assert region on server
     RegionStates regionStates = am.getRegionStates();
@@ -184,7 +184,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     assertTrue(regionStates.getRegionState(hri).isClosed());
   }
 
-  HRegionInfo createTableAndGetOneRegion(final TableName tableName)
+  RegionInfo createTableAndGetOneRegion(final TableName tableName)
       throws IOException, InterruptedException, ExecutionException {
     TableDescriptor desc =
         TableDescriptorBuilder.newBuilder(tableName)
@@ -195,7 +195,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
     long timeoutTime = System.currentTimeMillis() + 3000;
     while (true) {
-      List<HRegionInfo> regions =
+      List<RegionInfo> regions =
           master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
       if (regions.size() > 3) {
         return regions.get(2);
@@ -215,7 +215,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
   // Will cause the Master to tell the regionserver to shut itself down because
   // regionserver is reporting the state as OPEN.
   public void testOfflineRegion() throws Exception {
-    HRegionInfo hri = createTableAndGetOneRegion(tableName);
+    RegionInfo hri = createTableAndGetOneRegion(tableName);
 
     RegionStates regionStates =
         TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
@@ -224,7 +224,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     long timeoutTime = System.currentTimeMillis() + 3000;
     while (true) {
       if (regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OFFLINE)
-          .contains(hri)) break;
+          .stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) break;
       long now = System.currentTimeMillis();
       if (now > timeoutTime) {
         fail("Failed to offline the region in time");
@@ -238,21 +238,21 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
 
   @Test
   public void testGetRegionByStateOfTable() throws Exception {
-    HRegionInfo hri = createTableAndGetOneRegion(tableName);
+    RegionInfo hri = createTableAndGetOneRegion(tableName);
 
     RegionStates regionStates =
         TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
     assertTrue(regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OPEN)
-        .contains(hri));
+        .stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0));
     assertFalse(regionStates.getRegionByStateOfTable(TableName.valueOf("I_am_the_phantom"))
-        .get(RegionState.State.OPEN).contains(hri));
+        .get(RegionState.State.OPEN).stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0));
   }
 
   @Test
   public void testMoveRegion() throws Exception {
     admin.setBalancerOn(false).join();
 
-    HRegionInfo hri = createTableAndGetOneRegion(tableName);
+    RegionInfo hri = createTableAndGetOneRegion(tableName);
     RawAsyncHBaseAdmin rawAdmin = (RawAsyncHBaseAdmin) ASYNC_CONN.getAdmin();
     ServerName serverName = rawAdmin.getRegionLocation(hri.getRegionName()).get().getServerName();
 
@@ -312,7 +312,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
 
   @Test
   public void testFlushTableAndRegion() throws Exception {
-    HRegionInfo hri = createTableAndGetOneRegion(tableName);
+    RegionInfo hri = createTableAndGetOneRegion(tableName);
     ServerName serverName =
         TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
             .getRegionServerOfRegion(hri);
@@ -405,7 +405,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
 
     // Merge switch is off so merge should NOT succeed.
     assertTrue(admin.setMergeOn(false).get());
-    List<HRegionInfo> regions = admin.getTableRegions(tableName).get();
+    List<RegionInfo> regions = admin.getTableRegions(tableName).get();
     assertTrue(regions.size() > 1);
     admin.mergeRegions(regions.get(0).getRegionName(), regions.get(1).getRegionName(), true).join();
     int count = admin.getTableRegions(tableName).get().size();
@@ -437,8 +437,8 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     RawAsyncTable metaTable = ASYNC_CONN.getRawTable(META_TABLE_NAME);
     List<HRegionLocation> regionLocations =
         AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName)).get();
-    HRegionInfo regionA;
-    HRegionInfo regionB;
+    RegionInfo regionA;
+    RegionInfo regionB;
 
     // merge with full name
     assertEquals(3, regionLocations.size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
index dec28f3..b4af697 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
@@ -598,10 +597,10 @@ public class TestBlockEvictionFromClient {
       region.flush(true);
       LOG.info("About to SPLIT on " + Bytes.toString(ROW1));
       TEST_UTIL.getAdmin().split(tableName, ROW1);
-      List<HRegionInfo> tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName);
+      List<RegionInfo> tableRegions = TEST_UTIL.getAdmin().getRegions(tableName);
       // Wait for splits
       while (tableRegions.size() != 2) {
-        tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName);
+        tableRegions = TEST_UTIL.getAdmin().getRegions(tableName);
         Thread.sleep(100);
         LOG.info("Waiting on SPLIT to complete...");
       }
@@ -1520,7 +1519,7 @@ public class TestBlockEvictionFromClient {
     }
 
     @Override
-    public HRegionInfo getRegionInfo() {
+    public RegionInfo getRegionInfo() {
       return delegate.getRegionInfo();
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 6f590d1..98d864b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
@@ -107,7 +106,7 @@ public class TestMetaWithReplicas {
     });
     l.setBalancerOn(false);
     for (int replicaId = 1; replicaId < 3; replicaId ++) {
-      HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO,
+      RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO,
         replicaId);
       try {
         TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().waitForAssignment(h);
@@ -189,7 +188,7 @@ public class TestMetaWithReplicas {
         util.getAdmin().flush(TableName.META_TABLE_NAME);
         Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD,
             30000) * 6);
-        List<HRegionInfo> regions = MetaTableAccessor.getTableRegions(c, TABLE);
+        List<RegionInfo> regions = MetaTableAccessor.getTableRegions(c, TABLE);
         HRegionLocation hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0));
         // Ensure that the primary server for test table is not the same one as the primary
         // of the meta region since we will be killing the srv holding the meta's primary...
@@ -420,7 +419,7 @@ public class TestMetaWithReplicas {
     final TableName tableName = TableName.valueOf(name.getMethodName());
     TEST_UTIL.createTable(tableName, "f");
     assertTrue(TEST_UTIL.getAdmin().tableExists(tableName));
-    TEST_UTIL.getAdmin().move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
+    TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
         Bytes.toBytes(moveToServer.getServerName()));
     int i = 0;
     assert !moveToServer.equals(currentServer);
@@ -463,8 +462,8 @@ public class TestMetaWithReplicas {
   @Ignore @Test // Disabled because fsck and this needs work for AMv2
   public void testHBaseFsckWithExcessMetaReplicas() throws Exception {
     // Create a meta replica (this will be the 4th one) and assign it
-    HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(
-        HRegionInfo.FIRST_META_REGIONINFO, 3);
+    RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(
+        RegionInfoBuilder.FIRST_META_REGIONINFO, 3);
     TEST_UTIL.assignRegion(h);
     HBaseFsckRepair.waitUntilAssigned(TEST_UTIL.getAdmin(), h);
     // check that problem exists

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java
index eccb1f7..82f4952 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertNull;
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -83,7 +82,7 @@ public class TestMvccConsistentScanner {
   }
 
   private void move() throws IOException, InterruptedException {
-    HRegionInfo region =
+    RegionInfo region =
         UTIL.getHBaseCluster().getRegions(tableName).stream().findAny().get().getRegionInfo();
     HRegionServer rs =
         UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer())

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
index 3ff0f1f..8c5dbae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
@@ -46,10 +46,11 @@ import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -114,7 +115,7 @@ public class TestCoprocessorInterface {
     }
 
     @Override
-    public HRegionInfo getRegionInfo() {
+    public RegionInfo getRegionInfo() {
       return delegate.getRegionInfo();
     }
 
@@ -376,7 +377,7 @@ public class TestCoprocessorInterface {
 
   Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
       throws IOException {
-    //HRegionInfo info = new HRegionInfo(tableName, null, null, false);
+    //RegionInfo info = new RegionInfo(tableName, null, null, false);
     Region r = HRegion.openHRegion(closedRegion, null);
 
     // this following piece is a hack. currently a coprocessorHost
@@ -409,7 +410,11 @@ public class TestCoprocessorInterface {
       htd.addFamily(new HColumnDescriptor(family));
     }
     ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
-    HRegionInfo info = new HRegionInfo(tableName, null, null, false);
+    RegionInfo info = RegionInfoBuilder.newBuilder(tableName)
+        .setStartKey(null)
+        .setEndKey(null)
+        .setSplit(false)
+        .build();
     Path path = new Path(DIR + callingMethod);
     Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index b038d9d..2759a68 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -19,6 +19,11 @@
 
 package org.apache.hadoop.hbase.coprocessor;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -33,7 +38,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -64,10 +68,6 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -79,10 +79,10 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
 
 
 /**
@@ -1927,10 +1927,10 @@ public class TestMasterObserver {
       byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName().toString());
       //Make sure no regions are in transition now
       UTIL.waitUntilNoRegionsInTransition();
-      List<HRegionInfo> openRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
+      List<RegionInfo> openRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
       int moveCnt = openRegions.size()/2;
       for (int i=0; i<moveCnt; i++) {
-        HRegionInfo info = openRegions.get(i);
+        RegionInfo info = openRegions.get(i);
         if (!info.isMetaTable()) {
           master.getMasterRpcServices().moveRegion(null, RequestConverter.buildMoveRegionRequest(
               openRegions.get(i).getEncodedNameAsBytes(), destRS));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
index 2666340..88e548a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -53,6 +52,7 @@ import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -668,7 +668,7 @@ public class TestRegionObserverInterface {
         if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()) {
           continue;
         }
-        for (HRegionInfo r : ProtobufUtil
+        for (RegionInfo r : ProtobufUtil
             .getOnlineRegions(t.getRegionServer().getRSRpcServices())) {
           if (!r.getTable().equals(tableName)) {
             continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index 0446f61..2c9ab20 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -262,7 +263,7 @@ public class TestRegionObserverScannerOpenHook {
 
     @SuppressWarnings("deprecation")
     public CompactionCompletionNotifyingRegion(Path tableDir, WAL log,
-        FileSystem fs, Configuration confParam, HRegionInfo info,
+        FileSystem fs, Configuration confParam, RegionInfo info,
         TableDescriptor htd, RegionServerServices rsServices) {
       super(tableDir, log, fs, confParam, info, htd, rsServices);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java
index 8d0fe19..24bb4bd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java
@@ -35,9 +35,10 @@ import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.master.RackManager;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -160,16 +161,16 @@ public class TestFavoredNodeAssignmentHelper {
     Map<String,Integer> rackToServerCount = new HashMap<>();
     rackToServerCount.put("rack1", 10);
     // have lots of regions to test with
-    Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
+    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
-    Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
-    List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
+    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
+    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
         helper.placeSecondaryAndTertiaryRS(primaryRSMap);
     // although we created lots of regions we should have no overlap on the
     // primary/secondary/tertiary for any given region
-    for (HRegionInfo region : regions) {
+    for (RegionInfo region : regions) {
       ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
       assertNotNull(secondaryAndTertiaryServers);
       assertTrue(primaryRSMap.containsKey(region));
@@ -185,13 +186,13 @@ public class TestFavoredNodeAssignmentHelper {
     // the primary can be assigned but the secondary/tertiary would be null
     Map<String,Integer> rackToServerCount = new HashMap<>();
     rackToServerCount.put("rack1", 1);
-    Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
+    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
-    Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
-    List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
+    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
+    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
 
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
         helper.placeSecondaryAndTertiaryRS(primaryRSMap);
     // no secondary/tertiary placement in case of a single RegionServer
     assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
@@ -205,18 +206,18 @@ public class TestFavoredNodeAssignmentHelper {
     rackToServerCount.put("rack1", 10);
     rackToServerCount.put("rack2", 10);
 
-    Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
+    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
-    Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
+    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
 
     assertTrue(primaryRSMap.size() == 60000);
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
         helper.placeSecondaryAndTertiaryRS(primaryRSMap);
     assertTrue(secondaryAndTertiaryMap.size() == 60000);
     // for every region, the primary should be on one rack and the secondary/tertiary
     // on another (we create a lot of regions just to increase probability of failure)
-    for (Map.Entry<HRegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
+    for (Map.Entry<RegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
       ServerName[] allServersForRegion = entry.getValue();
       String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
       String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
@@ -235,15 +236,15 @@ public class TestFavoredNodeAssignmentHelper {
     Map<String,Integer> rackToServerCount = new HashMap<>();
     rackToServerCount.put("rack1", 1);
     rackToServerCount.put("rack2", 1);
-    Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
+    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
-    Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
-    List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
+    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
+    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
     assertTrue(primaryRSMap.size() == 6);
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
           helper.placeSecondaryAndTertiaryRS(primaryRSMap);
-    for (HRegionInfo region : regions) {
+    for (RegionInfo region : regions) {
       // not enough secondary/tertiary room to place the regions
       assertTrue(secondaryAndTertiaryMap.get(region) == null);
     }
@@ -259,16 +260,16 @@ public class TestFavoredNodeAssignmentHelper {
     Map<String,Integer> rackToServerCount = new HashMap<>();
     rackToServerCount.put("rack1", 2);
     rackToServerCount.put("rack2", 1);
-    Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
+    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
       primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
     FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
-    Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
-    List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
+    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
+    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
     assertTrue(primaryRSMap.size() == 6);
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
+    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
           helper.placeSecondaryAndTertiaryRS(primaryRSMap);
     assertTrue(secondaryAndTertiaryMap.size() == regions.size());
-    for (HRegionInfo region : regions) {
+    for (RegionInfo region : regions) {
       ServerName s = primaryRSMap.get(region);
       ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
       ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
@@ -279,28 +280,29 @@ public class TestFavoredNodeAssignmentHelper {
     }
   }
 
-  private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
+  private Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
   secondaryAndTertiaryRSPlacementHelper(
       int regionCount, Map<String, Integer> rackToServerCount) {
-    Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
+    Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>();
     List<ServerName> servers = getServersFromRack(rackToServerCount);
     FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
-    Map<ServerName, List<HRegionInfo>> assignmentMap =
-        new HashMap<ServerName, List<HRegionInfo>>();
+    Map<ServerName, List<RegionInfo>> assignmentMap =
+        new HashMap<ServerName, List<RegionInfo>>();
     helper.initialize();
     // create regions
-    List<HRegionInfo> regions = new ArrayList<>(regionCount);
+    List<RegionInfo> regions = new ArrayList<>(regionCount);
     for (int i = 0; i < regionCount; i++) {
-      HRegionInfo region = new HRegionInfo(TableName.valueOf(name.getMethodName()),
-          Bytes.toBytes(i), Bytes.toBytes(i + 1));
-      regions.add(region);
+      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+          .setStartKey(Bytes.toBytes(i))
+          .setEndKey(Bytes.toBytes(i + 1))
+          .build());
     }
     // place the regions
     helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
     return new Triple<>(primaryRSMap, helper, regions);
   }
 
-  private void primaryRSPlacement(int regionCount, Map<HRegionInfo, ServerName> primaryRSMap,
+  private void primaryRSPlacement(int regionCount, Map<RegionInfo, ServerName> primaryRSMap,
       int firstRackSize, int secondRackSize, int thirdRackSize) {
     Map<String,Integer> rackToServerCount = new HashMap<>();
     rackToServerCount.put("rack1", firstRackSize);
@@ -313,14 +315,15 @@ public class TestFavoredNodeAssignmentHelper {
 
     assertTrue(helper.canPlaceFavoredNodes());
 
-    Map<ServerName, List<HRegionInfo>> assignmentMap = new HashMap<>();
+    Map<ServerName, List<RegionInfo>> assignmentMap = new HashMap<>();
     if (primaryRSMap == null) primaryRSMap = new HashMap<>();
     // create some regions
-    List<HRegionInfo> regions = new ArrayList<>(regionCount);
+    List<RegionInfo> regions = new ArrayList<>(regionCount);
     for (int i = 0; i < regionCount; i++) {
-      HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
-          Bytes.toBytes(i), Bytes.toBytes(i + 1));
-      regions.add(region);
+      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf("foobar"))
+          .setStartKey(Bytes.toBytes(i))
+          .setEndKey(Bytes.toBytes(i + 1))
+          .build());
     }
     // place those regions in primary RSs
     helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
@@ -329,7 +332,7 @@ public class TestFavoredNodeAssignmentHelper {
     int regionsOnRack1 = 0;
     int regionsOnRack2 = 0;
     int regionsOnRack3 = 0;
-    for (HRegionInfo region : regions) {
+    for (RegionInfo region : regions) {
       if (rackManager.getRack(primaryRSMap.get(region)).equals("rack1")) {
         regionsOnRack1++;
       } else if (rackManager.getRack(primaryRSMap.get(region)).equals("rack2")) {
@@ -346,7 +349,7 @@ public class TestFavoredNodeAssignmentHelper {
 
   private void checkNumRegions(int regionCount, int firstRackSize, int secondRackSize,
       int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3,
-      Map<ServerName, List<HRegionInfo>> assignmentMap) {
+      Map<ServerName, List<RegionInfo>> assignmentMap) {
     //The regions should be distributed proportionately to the racksizes
     //Verify the ordering was as expected by inserting the racks and regions
     //in sorted maps. The keys being the racksize and numregions; values are
@@ -387,18 +390,19 @@ public class TestFavoredNodeAssignmentHelper {
     helper.initialize();
     assertTrue(helper.canPlaceFavoredNodes());
 
-    List<HRegionInfo> regions = new ArrayList<>(20);
+    List<RegionInfo> regions = new ArrayList<>(20);
     for (int i = 0; i < 20; i++) {
-      HRegionInfo region = new HRegionInfo(TableName.valueOf(name.getMethodName()),
-          Bytes.toBytes(i), Bytes.toBytes(i + 1));
-      regions.add(region);
+      regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+          .setStartKey(Bytes.toBytes(i))
+          .setEndKey(Bytes.toBytes(i + 1))
+          .build());
     }
-    Map<ServerName, List<HRegionInfo>> assignmentMap =
-        new HashMap<ServerName, List<HRegionInfo>>();
-    Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
+    Map<ServerName, List<RegionInfo>> assignmentMap =
+        new HashMap<ServerName, List<RegionInfo>>();
+    Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>();
     helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
     assertTrue(primaryRSMap.size() == regions.size());
-    Map<HRegionInfo, ServerName[]> secondaryAndTertiary =
+    Map<RegionInfo, ServerName[]> secondaryAndTertiary =
         helper.placeSecondaryAndTertiaryRS(primaryRSMap);
     assertEquals(regions.size(), secondaryAndTertiary.size());
   }
@@ -534,8 +538,10 @@ public class TestFavoredNodeAssignmentHelper {
     helper.initialize();
     assertTrue(helper.canPlaceFavoredNodes());
 
-    HRegionInfo region = new HRegionInfo(TableName.valueOf(name.getMethodName()),
-        HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
+    RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+        .setStartKey(HConstants.EMPTY_START_ROW)
+        .setEndKey(HConstants.EMPTY_END_ROW)
+        .build();
 
     for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) {
       List<ServerName> fn = helper.generateFavoredNodes(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index e96ce6d..fda3563 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -17,13 +17,14 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import static org.mockito.Mockito.mock;
+
 import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -31,6 +32,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
@@ -53,8 +55,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
 import com.google.protobuf.Service;
 
-import static org.mockito.Mockito.mock;
-
 public class MockNoopMasterServices implements MasterServices, Server {
   private final Configuration conf;
   private final MetricsMaster metricsMaster;
@@ -309,7 +309,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
 
   @Override
   public long mergeRegions(
-      final HRegionInfo[] regionsToMerge,
+      final RegionInfo[] regionsToMerge,
       final boolean forcible,
       final long nonceGroup,
       final long nonce) throws IOException {
@@ -318,7 +318,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
 
   @Override
   public long splitRegion(
-      final HRegionInfo regionInfo,
+      final RegionInfo regionInfo,
       final byte[] splitRow,
       final long nonceGroup,
       final long nonce) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 450bf8e..f777067 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -36,11 +36,12 @@ import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.locking.EntityLock;
@@ -436,7 +437,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   public GetRegionInfoResponse getRegionInfo(RpcController controller,
       GetRegionInfoRequest request) throws ServiceException {
     GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
-    builder.setRegionInfo(HRegionInfo.convert(HRegionInfo.FIRST_META_REGIONINFO));
+    builder.setRegionInfo(ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO));
     return builder.build();
   }
 
@@ -558,7 +559,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
-  public WAL getWAL(HRegionInfo regionInfo) throws IOException {
+  public WAL getWAL(RegionInfo regionInfo) throws IOException {
     return null;
   }
 
@@ -608,13 +609,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
-  public boolean reportRegionStateTransition(TransitionCode code, HRegionInfo... hris) {
+  public boolean reportRegionStateTransition(TransitionCode code, RegionInfo... hris) {
     return false;
   }
 
   @Override
   public boolean reportRegionStateTransition(TransitionCode code, long openSeqNum,
-      HRegionInfo... hris) {
+      RegionInfo... hris) {
     return false;
   }
 
@@ -674,7 +675,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
-  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description, Abortable abort)
+  public EntityLock regionLock(List<RegionInfo> regionInfos, String description, Abortable abort)
       throws IOException {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
index eaa2edb..4c4a8ed 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
@@ -32,13 +32,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -97,14 +97,14 @@ public class TestAssignmentListener {
     }
 
     @Override
-    public void regionOpened(final HRegionInfo regionInfo, final ServerName serverName) {
+    public void regionOpened(final RegionInfo regionInfo, final ServerName serverName) {
       LOG.info("Assignment open region=" + regionInfo + " server=" + serverName);
       openCount.incrementAndGet();
       modified.incrementAndGet();
     }
 
     @Override
-    public void regionClosed(final HRegionInfo regionInfo) {
+    public void regionClosed(final RegionInfo regionInfo) {
       LOG.info("Assignment close region=" + regionInfo);
       closeCount.incrementAndGet();
       modified.incrementAndGet();
@@ -267,7 +267,7 @@ public class TestAssignmentListener {
       // Merge the two regions
       LOG.info("Merge Regions");
       listener.reset();
-      List<HRegionInfo> regions = admin.getTableRegions(tableName);
+      List<RegionInfo> regions = admin.getRegions(tableName);
       assertEquals(2, regions.size());
       boolean sameServer = areAllRegionsLocatedOnSameServer(tableName);
       // If the regions are located by different server, we need to move

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
index 19415bd..28ed6a8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
@@ -18,40 +18,45 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaMockingUtil;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-
 import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
-
 @Category({MasterTests.class, MediumTests.class})
 public class TestCatalogJanitorInMemoryStates {
   private static final Log LOG = LogFactory.getLog(TestCatalogJanitorInMemoryStates.class);
@@ -129,7 +134,7 @@ public class TestCatalogJanitorInMemoryStates {
  * @return List of region locations
  * @throws IOException, InterruptedException
  */
-  private List<HRegionLocation> splitRegion(final HRegionInfo r)
+  private List<HRegionLocation> splitRegion(final RegionInfo r)
       throws IOException, InterruptedException {
     List<HRegionLocation> locations = new ArrayList<>();
     // Split this table in two.
@@ -137,7 +142,7 @@ public class TestCatalogJanitorInMemoryStates {
     Connection connection = TEST_UTIL.getConnection();
     admin.splitRegion(r.getEncodedNameAsBytes());
     admin.close();
-    PairOfSameType<HRegionInfo> regions = waitOnDaughters(r);
+    PairOfSameType<RegionInfo> regions = waitOnDaughters(r);
     if (regions != null) {
       try (RegionLocator rl = connection.getRegionLocator(r.getTable())) {
         locations.add(rl.getRegionLocation(regions.getFirst().getEncodedNameAsBytes()));
@@ -154,20 +159,20 @@ public class TestCatalogJanitorInMemoryStates {
    * @param r
    * @return Daughter regions; caller needs to check table actually split.
    */
-  private PairOfSameType<HRegionInfo> waitOnDaughters(final HRegionInfo r)
+  private PairOfSameType<RegionInfo> waitOnDaughters(final RegionInfo r)
       throws IOException {
     long start = System.currentTimeMillis();
-    PairOfSameType<HRegionInfo> pair = null;
+    PairOfSameType<RegionInfo> pair = null;
     try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
          Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) {
       Result result = null;
-      HRegionInfo region = null;
+      RegionInfo region = null;
       while ((System.currentTimeMillis() - start) < 60000) {
         result = metaTable.get(new Get(r.getRegionName()));
         if (result == null) {
           break;
         }
-        region = MetaTableAccessor.getHRegionInfo(result);
+        region = MetaTableAccessor.getRegionInfo(result);
         if (region.isSplitParent()) {
           LOG.debug(region.toString() + " IS a parent!");
           pair = MetaTableAccessor.getDaughterRegions(result);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 4763169..8fb76c7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -77,6 +76,8 @@ import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.NonceGenerator;
 import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
@@ -85,13 +86,11 @@ import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
 import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -102,6 +101,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALSplitter;
@@ -120,6 +120,8 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
 @Category({MasterTests.class, LargeTests.class})
 @SuppressWarnings("deprecation")
 public class TestDistributedLogSplitting {
@@ -232,13 +234,13 @@ public class TestDistributedLogSplitting {
         "table", "family", 40);
     try {
       TableName table = t.getName();
-      List<HRegionInfo> regions = null;
+      List<RegionInfo> regions = null;
       HRegionServer hrs = null;
       for (int i = 0; i < NUM_RS; i++) {
         boolean foundRs = false;
         hrs = rsts.get(i).getRegionServer();
         regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
-        for (HRegionInfo region : regions) {
+        for (RegionInfo region : regions) {
           if (region.getTable().getNameAsString().equalsIgnoreCase("table")) {
             foundRs = true;
             break;
@@ -250,9 +252,9 @@ public class TestDistributedLogSplitting {
           .getServerName().toString()));
 
       LOG.info("#regions = " + regions.size());
-      Iterator<HRegionInfo> it = regions.iterator();
+      Iterator<RegionInfo> it = regions.iterator();
       while (it.hasNext()) {
-        HRegionInfo region = it.next();
+        RegionInfo region = it.next();
         if (region.getTable().getNamespaceAsString()
             .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
           it.remove();
@@ -264,7 +266,7 @@ public class TestDistributedLogSplitting {
       slm.splitLogDistributed(logDir);
 
       int count = 0;
-      for (HRegionInfo hri : regions) {
+      for (RegionInfo hri : regions) {
 
         Path tdir = FSUtils.getTableDir(rootdir, table);
         Path editsdir =
@@ -315,7 +317,7 @@ public class TestDistributedLogSplitting {
     Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
     try {
       HRegionServer hrs = findRSToKill(false, "table");
-      List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+      List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
       makeWAL(hrs, regions, "table", "family", NUM_LOG_LINES, 100);
 
       // wait for abort completes
@@ -374,8 +376,8 @@ public class TestDistributedLogSplitting {
       List<Increment> reqs = new ArrayList<>();
       for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
         HRegionServer hrs = rst.getRegionServer();
-        List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
-        for (HRegionInfo hri : hris) {
+        List<RegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+        for (RegionInfo hri : hris) {
           if (TABLE_NAME.equalsIgnoreCase(hri.getTable().getNameAsString())) {
             byte[] key = hri.getStartKey();
             if (key == null || key.length == 0) {
@@ -424,7 +426,7 @@ public class TestDistributedLogSplitting {
     Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
     try {
       HRegionServer hrs = findRSToKill(true, "table");
-      List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+      List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
       makeWAL(hrs, regions, "table", "family", NUM_LOG_LINES, 100);
 
       this.abortRSAndVerifyRecovery(hrs, ht, zkw, NUM_REGIONS_TO_CREATE, NUM_LOG_LINES);
@@ -494,7 +496,7 @@ public class TestDistributedLogSplitting {
     Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
     try {
       HRegionServer hrs = findRSToKill(false, "table");
-      List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+      List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
       makeWAL(hrs, regions, "table", "family", NUM_LOG_LINES, 100);
 
       // abort master
@@ -552,7 +554,7 @@ public class TestDistributedLogSplitting {
     Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
     try {
       HRegionServer hrs = findRSToKill(false, "table");
-      List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+      List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
       makeWAL(hrs, regions, "table", "family", NUM_LOG_LINES, 100);
 
       // abort master
@@ -613,7 +615,7 @@ public class TestDistributedLogSplitting {
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
     Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
     try {
-      List<HRegionInfo> regions = null;
+      List<RegionInfo> regions = null;
       HRegionServer hrs1 = findRSToKill(false, "table");
       regions = ProtobufUtil.getOnlineRegions(hrs1.getRSRpcServices());
 
@@ -694,14 +696,14 @@ public class TestDistributedLogSplitting {
     try {
       final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager();
 
-      Set<HRegionInfo> regionSet = new HashSet<>();
-      HRegionInfo region = null;
+      Set<RegionInfo> regionSet = new HashSet<>();
+      RegionInfo region = null;
       HRegionServer hrs = null;
       ServerName firstFailedServer = null;
       ServerName secondFailedServer = null;
       for (int i = 0; i < NUM_RS; i++) {
         hrs = rsts.get(i).getRegionServer();
-        List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+        List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
         if (regions.isEmpty()) continue;
         region = regions.get(0);
         regionSet.add(region);
@@ -746,13 +748,13 @@ public class TestDistributedLogSplitting {
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
     Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
     try {
-      List<HRegionInfo> regions = null;
+      List<RegionInfo> regions = null;
       HRegionServer hrs = null;
       for (int i = 0; i < NUM_RS; i++) {
         boolean isCarryingMeta = false;
         hrs = rsts.get(i).getRegionServer();
         regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
-        for (HRegionInfo region : regions) {
+        for (RegionInfo region : regions) {
           if (region.isMetaRegion()) {
             isCarryingMeta = true;
             break;
@@ -795,7 +797,7 @@ public class TestDistributedLogSplitting {
       // they will consume recovered.edits
       master.balanceSwitch(false);
 
-      List<HRegionInfo> regions = null;
+      List<RegionInfo> regions = null;
       HRegionServer hrs = null;
       boolean hasRegionsForBothTables = false;
       String tableName = null;
@@ -805,7 +807,7 @@ public class TestDistributedLogSplitting {
         boolean isCarryingSystem = false;
         hrs = rsts.get(i).getRegionServer();
         regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
-        for (HRegionInfo region : regions) {
+        for (RegionInfo region : regions) {
           if (region.getTable().isSystemTable()) {
             isCarryingSystem = true;
             break;
@@ -831,9 +833,9 @@ public class TestDistributedLogSplitting {
       Assert.assertTrue(hasRegionsForBothTables);
 
       LOG.info("#regions = " + regions.size());
-      Iterator<HRegionInfo> it = regions.iterator();
+      Iterator<RegionInfo> it = regions.iterator();
       while (it.hasNext()) {
-        HRegionInfo region = it.next();
+        RegionInfo region = it.next();
         if (region.isMetaTable()) {
           it.remove();
         }
@@ -882,7 +884,7 @@ public class TestDistributedLogSplitting {
       FileSystem fs = master.getMasterFileSystem().getFileSystem();
       Path rootdir = FSUtils.getRootDir(conf);
       Path tdir = FSUtils.getTableDir(rootdir, TableName.valueOf(name.getMethodName()));
-      for (HRegionInfo hri : regions) {
+      for (RegionInfo hri : regions) {
         Path editsdir =
             WALSplitter.getRegionDirRecoveredEditsDir(
                 HRegion.getRegionDir(tdir, hri.getEncodedName()));
@@ -912,7 +914,7 @@ public class TestDistributedLogSplitting {
       assertEquals(NUM_LOG_LINES, TEST_UTIL.countRows(ht));
 
       // clean up
-      for (HRegionInfo hri : regions) {
+      for (RegionInfo hri : regions) {
         Path editsdir =
             WALSplitter.getRegionDirRecoveredEditsDir(
                 HRegion.getRegionDir(tdir, hri.getEncodedName()));
@@ -943,13 +945,13 @@ public class TestDistributedLogSplitting {
     try {
       final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager();
 
-      Set<HRegionInfo> regionSet = new HashSet<>();
-      HRegionInfo region = null;
+      Set<RegionInfo> regionSet = new HashSet<>();
+      RegionInfo region = null;
       HRegionServer hrs = null;
       HRegionServer dstRS = null;
       for (int i = 0; i < NUM_RS; i++) {
         hrs = rsts.get(i).getRegionServer();
-        List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+        List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
         if (regions.isEmpty()) continue;
         region = regions.get(0);
         regionSet.add(region);
@@ -959,7 +961,7 @@ public class TestDistributedLogSplitting {
 
       slm.markRegionsRecovering(hrs.getServerName(), regionSet);
       // move region in order for the region opened in recovering state
-      final HRegionInfo hri = region;
+      final RegionInfo hri = region;
       final HRegionServer tmpRS = dstRS;
       TEST_UTIL.getAdmin().move(region.getEncodedNameAsBytes(),
           Bytes.toBytes(dstRS.getServerName().getServerName()));
@@ -1212,20 +1214,20 @@ public class TestDistributedLogSplitting {
 
     // only testing meta recovery in ZK operation
     HRegionServer hrs = findRSToKill(true, null);
-    List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+    List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
 
     LOG.info("#regions = " + regions.size());
-    Set<HRegionInfo> tmpRegions = new HashSet<>();
-    tmpRegions.add(HRegionInfo.FIRST_META_REGIONINFO);
+    Set<RegionInfo> tmpRegions = new HashSet<>();
+    tmpRegions.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
     master.getMasterWalManager().prepareLogReplay(hrs.getServerName(), tmpRegions);
-    Set<HRegionInfo> userRegionSet = new HashSet<>();
+    Set<RegionInfo> userRegionSet = new HashSet<>();
     userRegionSet.addAll(regions);
     master.getMasterWalManager().prepareLogReplay(hrs.getServerName(), userRegionSet);
     boolean isMetaRegionInRecovery = false;
     List<String> recoveringRegions =
         zkw.getRecoverableZooKeeper().getChildren(zkw.znodePaths.recoveringRegionsZNode, false);
     for (String curEncodedRegionName : recoveringRegions) {
-      if (curEncodedRegionName.equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
+      if (curEncodedRegionName.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName())) {
         isMetaRegionInRecovery = true;
         break;
       }
@@ -1238,7 +1240,7 @@ public class TestDistributedLogSplitting {
     recoveringRegions =
         zkw.getRecoverableZooKeeper().getChildren(zkw.znodePaths.recoveringRegionsZNode, false);
     for (String curEncodedRegionName : recoveringRegions) {
-      if (curEncodedRegionName.equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
+      if (curEncodedRegionName.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName())) {
         isMetaRegionInRecovery = true;
         break;
       }
@@ -1265,13 +1267,13 @@ public class TestDistributedLogSplitting {
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
     Table ht = installTable(zkw, name.getMethodName(), "family", NUM_REGIONS_TO_CREATE);
     try {
-      List<HRegionInfo> regions = null;
+      List<RegionInfo> regions = null;
       HRegionServer hrs = null;
       for (int i = 0; i < NUM_RS; i++) {
         boolean isCarryingMeta = false;
         hrs = rsts.get(i).getRegionServer();
         regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
-        for (HRegionInfo region : regions) {
+        for (RegionInfo region : regions) {
           if (region.isMetaRegion()) {
             isCarryingMeta = true;
             break;
@@ -1284,17 +1286,17 @@ public class TestDistributedLogSplitting {
       }
 
       LOG.info("#regions = " + regions.size());
-      Iterator<HRegionInfo> it = regions.iterator();
+      Iterator<RegionInfo> it = regions.iterator();
       while (it.hasNext()) {
-        HRegionInfo region = it.next();
+        RegionInfo region = it.next();
         if (region.isMetaTable()
             || region.getEncodedName().equals(
-            HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
+            RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName())) {
           it.remove();
         }
       }
       if (regions.isEmpty()) return;
-      HRegionInfo curRegionInfo = regions.get(0);
+      RegionInfo curRegionInfo = regions.get(0);
       byte[] startRow = curRegionInfo.getStartKey();
       if (startRow == null || startRow.length == 0) {
         startRow = new byte[] { 0, 0, 0, 0, 1 };
@@ -1361,13 +1363,13 @@ public class TestDistributedLogSplitting {
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
     Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
     try {
-      List<HRegionInfo> regions = null;
+      List<RegionInfo> regions = null;
       HRegionServer hrs = null;
       for (int i = 0; i < NUM_RS; i++) {
         boolean isCarryingMeta = false;
         hrs = rsts.get(i).getRegionServer();
         regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
-        for (HRegionInfo region : regions) {
+        for (RegionInfo region : regions) {
           if (region.isMetaRegion()) {
             isCarryingMeta = true;
             break;
@@ -1380,16 +1382,16 @@ public class TestDistributedLogSplitting {
       }
 
       LOG.info("#regions = " + regions.size());
-      Iterator<HRegionInfo> it = regions.iterator();
+      Iterator<RegionInfo> it = regions.iterator();
       while (it.hasNext()) {
-        HRegionInfo region = it.next();
+        RegionInfo region = it.next();
         if (region.isMetaTable()
-            || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
+            || region.getEncodedName().equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName())) {
           it.remove();
         }
       }
       if (regions.isEmpty()) return;
-      HRegionInfo curRegionInfo = regions.get(0);
+      RegionInfo curRegionInfo = regions.get(0);
       byte[] startRow = curRegionInfo.getStartKey();
       if (startRow == null || startRow.length == 0) {
         startRow = new byte[] { 0, 0, 0, 0, 1 };
@@ -1532,8 +1534,8 @@ public class TestDistributedLogSplitting {
 
     for (RegionServerThread rst : rsts) {
       HRegionServer hrs = rst.getRegionServer();
-      List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
-      for (HRegionInfo hri : hris) {
+      List<RegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+      for (RegionInfo hri : hris) {
         if (hri.getTable().isSystemTable()) {
           continue;
         }
@@ -1547,14 +1549,14 @@ public class TestDistributedLogSplitting {
 
     for (MasterThread mt : cluster.getLiveMasterThreads()) {
       HRegionServer hrs = mt.getMaster();
-      List<HRegionInfo> hris;
+      List<RegionInfo> hris;
       try {
         hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
       } catch (ServerNotRunningYetException e) {
         // It's ok: this master may be a backup. Ignored.
         continue;
       }
-      for (HRegionInfo hri : hris) {
+      for (RegionInfo hri : hris) {
         if (hri.getTable().isSystemTable()) {
           continue;
         }
@@ -1567,22 +1569,22 @@ public class TestDistributedLogSplitting {
     }
   }
 
-  public void makeWAL(HRegionServer hrs, List<HRegionInfo> regions, String tname, String fname,
+  public void makeWAL(HRegionServer hrs, List<RegionInfo> regions, String tname, String fname,
       int num_edits, int edit_size) throws IOException {
     makeWAL(hrs, regions, tname, fname, num_edits, edit_size, true);
   }
 
-  public void makeWAL(HRegionServer hrs, List<HRegionInfo> regions, String tname, String fname,
+  public void makeWAL(HRegionServer hrs, List<RegionInfo> regions, String tname, String fname,
       int num_edits, int edit_size, boolean cleanShutdown) throws IOException {
     TableName fullTName = TableName.valueOf(tname);
     // remove root and meta region
-    regions.remove(HRegionInfo.FIRST_META_REGIONINFO);
+    regions.remove(RegionInfoBuilder.FIRST_META_REGIONINFO);
     // using one sequenceId for edits across all regions is ok.
     final AtomicLong sequenceId = new AtomicLong(10);
 
 
-    for(Iterator<HRegionInfo> iter = regions.iterator(); iter.hasNext(); ) {
-      HRegionInfo regionInfo = iter.next();
+    for(Iterator<RegionInfo> iter = regions.iterator(); iter.hasNext(); ) {
+      RegionInfo regionInfo = iter.next();
       if(regionInfo.getTable().isSystemTable()) {
         iter.remove();
       }
@@ -1592,8 +1594,8 @@ public class TestDistributedLogSplitting {
     htd.addFamily(new HColumnDescriptor(family));
     byte[] value = new byte[edit_size];
 
-    List<HRegionInfo> hris = new ArrayList<>();
-    for (HRegionInfo region : regions) {
+    List<RegionInfo> hris = new ArrayList<>();
+    for (RegionInfo region : regions) {
       if (!region.getTable().getNameAsString().equalsIgnoreCase(tname)) {
         continue;
       }
@@ -1610,7 +1612,7 @@ public class TestDistributedLogSplitting {
     if (n > 0) {
       for (int i = 0; i < num_edits; i += 1) {
         WALEdit e = new WALEdit();
-        HRegionInfo curRegionInfo = hris.get(i % n);
+        RegionInfo curRegionInfo = hris.get(i % n);
         final WAL log = hrs.getWAL(curRegionInfo);
         byte[] startRow = curRegionInfo.getStartKey();
         if (startRow == null || startRow.length == 0) {
@@ -1633,12 +1635,12 @@ public class TestDistributedLogSplitting {
     }
     // done as two passes because the regions might share logs. shutdown is idempotent, but sync
     // will cause errors if done after.
-    for (HRegionInfo info : hris) {
+    for (RegionInfo info : hris) {
       final WAL log = hrs.getWAL(info);
       log.sync();
     }
     if (cleanShutdown) {
-      for (HRegionInfo info : hris) {
+      for (RegionInfo info : hris) {
         final WAL log = hrs.getWAL(info);
         log.shutdown();
       }
@@ -1754,7 +1756,7 @@ public class TestDistributedLogSplitting {
    */
   private HRegionServer findRSToKill(boolean hasMetaRegion, String tableName) throws Exception {
     List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
-    List<HRegionInfo> regions = null;
+    List<RegionInfo> regions = null;
     HRegionServer hrs = null;
 
     for (RegionServerThread rst: rsts) {
@@ -1768,7 +1770,7 @@ public class TestDistributedLogSplitting {
       boolean isCarryingMeta = false;
       boolean foundTableRegion = false;
       regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
-      for (HRegionInfo region : regions) {
+      for (RegionInfo region : regions) {
         if (region.isMetaRegion()) {
           isCarryingMeta = true;
         }
@@ -1784,9 +1786,9 @@ public class TestDistributedLogSplitting {
         if (!foundTableRegion) {
           final HRegionServer destRS = hrs;
           // the RS doesn't have regions of the specified table so we need move one to this RS
-          List<HRegionInfo> tableRegions =
-              TEST_UTIL.getAdmin().getTableRegions(TableName.valueOf(tableName));
-          final HRegionInfo hri = tableRegions.get(0);
+          List<RegionInfo> tableRegions =
+              TEST_UTIL.getAdmin().getRegions(TableName.valueOf(tableName));
+          final RegionInfo hri = tableRegions.get(0);
           TEST_UTIL.getAdmin().move(hri.getEncodedNameAsBytes(),
               Bytes.toBytes(destRS.getServerName().getServerName()));
           // wait for region move completes

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 1c31acd..c4163d5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -31,7 +31,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -40,6 +39,8 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
@@ -53,9 +54,9 @@ import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
-import org.junit.rules.TestName;
 
 @Category({MasterTests.class, MediumTests.class})
 public class TestMaster {
@@ -94,7 +95,7 @@ public class TestMaster {
       TEST_UTIL.loadTable(ht, FAMILYNAME, false);
     }
 
-    List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
+    List<Pair<RegionInfo, ServerName>> tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
         m.getConnection(), TABLENAME);
     LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
     assertEquals(1, tableRegions.size());
@@ -119,13 +120,13 @@ public class TestMaster {
     // We have three regions because one is split-in-progress
     assertEquals(3, tableRegions.size());
     LOG.info("Making sure we can call getTableRegionClosest while opening");
-    Pair<HRegionInfo, ServerName> pair =
+    Pair<RegionInfo, ServerName> pair =
         m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
     LOG.info("Result is: " + pair);
-    Pair<HRegionInfo, ServerName> tableRegionFromName =
+    Pair<RegionInfo, ServerName> tableRegionFromName =
         MetaTableAccessor.getRegion(m.getConnection(),
           pair.getFirst().getRegionName());
-    assertEquals(tableRegionFromName.getFirst(), pair.getFirst());
+    assertTrue(RegionInfo.COMPARATOR.compare(tableRegionFromName.getFirst(), pair.getFirst()) == 0);
   }
 
   @Test
@@ -134,7 +135,7 @@ public class TestMaster {
     HMaster m = cluster.getMaster();
     try {
       m.setInitialized(false); // fake it, set back later
-      HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO;
+      RegionInfo meta = RegionInfoBuilder.FIRST_META_REGIONINFO;
       m.move(meta.getEncodedNameAsBytes(), null);
       fail("Region should not be moved since master is not initialized");
     } catch (IOException ioe) {
@@ -153,8 +154,10 @@ public class TestMaster {
 
     admin.createTable(htd, null);
     try {
-      HRegionInfo hri = new HRegionInfo(
-        tableName, Bytes.toBytes("A"), Bytes.toBytes("Z"));
+      RegionInfo hri = RegionInfoBuilder.newBuilder(tableName)
+          .setStartKey(Bytes.toBytes("A"))
+          .setEndKey(Bytes.toBytes("Z"))
+          .build();
       admin.move(hri.getEncodedNameAsBytes(), null);
       fail("Region should not be moved since it is fake");
     } catch (IOException ioe) {
@@ -174,7 +177,7 @@ public class TestMaster {
 
     admin.createTable(htd, null);
     try {
-      List<HRegionInfo> tableRegions = admin.getTableRegions(tableName);
+      List<RegionInfo> tableRegions = admin.getRegions(tableName);
 
       master.setInitialized(false); // fake it, set back later
       admin.move(tableRegions.get(0).getEncodedNameAsBytes(), null);