You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2015/06/25 23:42:50 UTC

[2/6] hbase git commit: HBASE-13893 Replace HTable with Table in client tests (Jurriaan Mous)

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 6d1859c..bd6d452 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -196,7 +196,7 @@ public class TestHRegion {
   private final int MAX_VERSIONS = 2;
 
   // Test names
-  protected byte[] tableName;
+  protected TableName tableName;
   protected String method;
   protected final byte[] qual1 = Bytes.toBytes("qual1");
   protected final byte[] qual2 = Bytes.toBytes("qual2");
@@ -216,7 +216,7 @@ public class TestHRegion {
     CONF = TEST_UTIL.getConfiguration();
     dir = TEST_UTIL.getDataTestDir("TestHRegion").toString();
     method = name.getMethodName();
-    tableName = Bytes.toBytes(name.getMethodName());
+    tableName = TableName.valueOf(name.getMethodName());
   }
 
   @After
@@ -351,13 +351,13 @@ public class TestHRegion {
    * @param callingMethod a unique component for the path, probably the name of the test method.
    */
   private static WAL createWALCompatibleWithFaultyFileSystem(String callingMethod,
-      Configuration conf, byte[] tableName) throws IOException {
+      Configuration conf, TableName tableName) throws IOException {
     final Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log");
     final Configuration walConf = new Configuration(conf);
     FSUtils.setRootDir(walConf, logDir);
     return (new WALFactory(walConf,
         Collections.<WALActionsListener>singletonList(new MetricsWAL()), callingMethod))
-        .getWAL(tableName);
+        .getWAL(tableName.toBytes());
   }
 
   /**
@@ -888,7 +888,7 @@ public class TestHRegion {
     final WALFactory wals = new WALFactory(walConf, null, method);
     final WAL wal = wals.getWAL(tableName.getName());
 
-    this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
+    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
       HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
     try {
       Path regiondir = region.getRegionFileSystem().getRegionDir();
@@ -1046,7 +1046,7 @@ public class TestHRegion {
     final WALFactory wals = new WALFactory(walConf, null, method);
     WAL wal = spy(wals.getWAL(tableName.getName()));
 
-    this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
+    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
       HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
     try {
       int i = 0;
@@ -1089,7 +1089,7 @@ public class TestHRegion {
       }
 
       region.close();
-      this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
+      this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
         HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
       region.put(put);
 
@@ -1207,7 +1207,7 @@ public class TestHRegion {
    */
   @Test
   public void testWeirdCacheBehaviour() throws Exception {
-    byte[] TABLE = Bytes.toBytes("testWeirdCacheBehaviour");
+    TableName TABLE = TableName.valueOf("testWeirdCacheBehaviour");
     byte[][] FAMILIES = new byte[][] { Bytes.toBytes("trans-blob"), Bytes.toBytes("trans-type"),
         Bytes.toBytes("trans-date"), Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") };
     this.region = initHRegion(TABLE, getName(), CONF, FAMILIES);
@@ -1250,7 +1250,7 @@ public class TestHRegion {
 
   @Test
   public void testAppendWithReadOnlyTable() throws Exception {
-    byte[] TABLE = Bytes.toBytes("readOnlyTable");
+    TableName TABLE = TableName.valueOf("readOnlyTable");
     this.region = initHRegion(TABLE, getName(), CONF, true, Bytes.toBytes("somefamily"));
     boolean exceptionCaught = false;
     Append append = new Append(Bytes.toBytes("somerow"));
@@ -1270,7 +1270,7 @@ public class TestHRegion {
 
   @Test
   public void testIncrWithReadOnlyTable() throws Exception {
-    byte[] TABLE = Bytes.toBytes("readOnlyTable");
+    TableName TABLE = TableName.valueOf("readOnlyTable");
     this.region = initHRegion(TABLE, getName(), CONF, true, Bytes.toBytes("somefamily"));
     boolean exceptionCaught = false;
     Increment inc = new Increment(Bytes.toBytes("somerow"));
@@ -1362,11 +1362,11 @@ public class TestHRegion {
 
   @Test
   public void testFamilyWithAndWithoutColon() throws Exception {
-    byte[] b = Bytes.toBytes(getName());
+    TableName b = TableName.valueOf(getName());
     byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
     this.region = initHRegion(b, getName(), CONF, cf);
     try {
-      Put p = new Put(b);
+      Put p = new Put(b.toBytes());
       byte[] cfwithcolon = Bytes.toBytes(COLUMN_FAMILY + ":");
       p.add(cfwithcolon, cfwithcolon, cfwithcolon);
       boolean exception = false;
@@ -1387,7 +1387,7 @@ public class TestHRegion {
     byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
     byte[] qual = Bytes.toBytes("qual");
     byte[] val = Bytes.toBytes("val");
-    this.region = initHRegion(Bytes.toBytes(getName()), getName(), CONF, cf);
+    this.region = initHRegion(TableName.valueOf(getName()), getName(), CONF, cf);
     MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
     try {
       long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source);
@@ -1428,7 +1428,7 @@ public class TestHRegion {
     byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
     byte[] qual = Bytes.toBytes("qual");
     byte[] val = Bytes.toBytes("val");
-    this.region = initHRegion(Bytes.toBytes(getName()), getName(), CONF, cf);
+    this.region = initHRegion(TableName.valueOf(getName()), getName(), CONF, cf);
     MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
     try {
       long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source);
@@ -1526,7 +1526,7 @@ public class TestHRegion {
 
   @Test
   public void testBatchPutWithTsSlop() throws Exception {
-    byte[] b = Bytes.toBytes(getName());
+    TableName b = TableName.valueOf(getName());
     byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
     byte[] qual = Bytes.toBytes("qual");
     byte[] val = Bytes.toBytes("val");
@@ -4049,7 +4049,7 @@ public class TestHRegion {
     HColumnDescriptor hcd = new HColumnDescriptor(fam1).setMaxVersions(Integer.MAX_VALUE)
         .setBloomFilterType(BloomType.ROWCOL);
 
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
+    HTableDescriptor htd = new HTableDescriptor(tableName);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
     this.region = TEST_UTIL.createLocalHRegion(info, htd);
@@ -4154,7 +4154,7 @@ public class TestHRegion {
     HColumnDescriptor hcd = new HColumnDescriptor(familyName).setMaxVersions(Integer.MAX_VALUE)
         .setBloomFilterType(BloomType.ROWCOL);
 
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
+    HTableDescriptor htd = new HTableDescriptor(tableName);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
     this.region = TEST_UTIL.createLocalHRegion(info, htd);
@@ -4204,7 +4204,7 @@ public class TestHRegion {
     try {
       cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
       byte[][] families = { fam1, fam2 };
-      Table ht = htu.createTable(Bytes.toBytes(this.getName()), families);
+      Table ht = htu.createTable(TableName.valueOf(this.getName()), families);
 
       // Setting up region
       byte row[] = Bytes.toBytes("row1");
@@ -4646,7 +4646,7 @@ public class TestHRegion {
     FSUtils.setRootDir(walConf, logDir);
     final WALFactory wals = new WALFactory(walConf, null, UUID.randomUUID().toString());
     final WAL wal = spy(wals.getWAL(tableName.getName()));
-    this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
+    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
         HConstants.EMPTY_END_ROW, method, conf, false, tableDurability, wal,
         new byte[][] { family });
 
@@ -4949,7 +4949,7 @@ public class TestHRegion {
   @Test
   public void testFlushResult() throws IOException {
     String method = name.getMethodName();
-    byte[] tableName = Bytes.toBytes(method);
+    TableName tableName = TableName.valueOf(method);
     byte[] family = Bytes.toBytes("family");
 
     this.region = initHRegion(tableName, method, family);
@@ -4961,7 +4961,7 @@ public class TestHRegion {
 
     // Flush enough files to get up to the threshold, doesn't need compactions
     for (int i = 0; i < 2; i++) {
-      Put put = new Put(tableName).add(family, family, tableName);
+      Put put = new Put(tableName.toBytes()).add(family, family, tableName.toBytes());
       region.put(put);
       fr = region.flush(true);
       assertTrue(fr.isFlushSucceeded());
@@ -4970,7 +4970,7 @@ public class TestHRegion {
 
     // Two flushes after the threshold, compactions are needed
     for (int i = 0; i < 2; i++) {
-      Put put = new Put(tableName).add(family, family, tableName);
+      Put put = new Put(tableName.toBytes()).add(family, family, tableName.toBytes());
       region.put(put);
       fr = region.flush(true);
       assertTrue(fr.isFlushSucceeded());
@@ -5007,20 +5007,6 @@ public class TestHRegion {
    */
   private static HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf,
       byte[]... families) throws IOException {
-    return initHRegion(tableName.getName(), null, null, callingMethod, conf, false, families);
-  }
-
-  /**
-   * @param tableName
-   * @param callingMethod
-   * @param conf
-   * @param families
-   * @throws IOException
-   * @return A region on which you must call
-   *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
-   */
-  private static HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
-      byte[]... families) throws IOException {
     return initHRegion(tableName, null, null, callingMethod, conf, false, families);
   }
 
@@ -5034,16 +5020,16 @@ public class TestHRegion {
    * @return A region on which you must call
    *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
    */
-  private static HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
+  private static HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf,
       boolean isReadOnly, byte[]... families) throws IOException {
     return initHRegion(tableName, null, null, callingMethod, conf, isReadOnly, families);
   }
 
-  public static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
+  public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
       String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families)
       throws IOException {
     Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log");
-    HRegionInfo hri = new HRegionInfo(TableName.valueOf(tableName), startKey, stopKey);
+    HRegionInfo hri = new HRegionInfo(tableName, startKey, stopKey);
     final WAL wal = HBaseTestingUtility.createWal(conf, logDir, hri);
     return initHRegion(tableName, startKey, stopKey, callingMethod, conf, isReadOnly,
         Durability.SYNC_WAL, wal, families);
@@ -5061,10 +5047,10 @@ public class TestHRegion {
    * @return A region on which you must call
    *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
    */
-  public static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
+  public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
       WAL wal, byte[]... families) throws IOException {
-    return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, callingMethod, conf,
+    return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey,
         isReadOnly, durability, wal, families);
   }
 
@@ -5647,7 +5633,7 @@ public class TestHRegion {
 
   @Test (timeout=60000)
   public void testSplitRegionWithReverseScan() throws IOException {
-    byte [] tableName = Bytes.toBytes("testSplitRegionWithReverseScan");
+    TableName tableName = TableName.valueOf("testSplitRegionWithReverseScan");
     byte [] qualifier = Bytes.toBytes("qualifier");
     Configuration hc = initSplit();
     int numRows = 3;
@@ -6024,7 +6010,7 @@ public class TestHRegion {
   @Test (timeout=24000)
   public void testRegionTooBusy() throws IOException {
     String method = "testRegionTooBusy";
-    byte[] tableName = Bytes.toBytes(method);
+    TableName tableName = TableName.valueOf(method);
     byte[] family = Bytes.toBytes("family");
     long defaultBusyWaitDuration = CONF.getLong("hbase.busy.wait.duration",
       HRegion.DEFAULT_BUSY_WAIT_DURATION);
@@ -6198,7 +6184,7 @@ public class TestHRegion {
     }
   }
 
-  static HRegion initHRegion(byte[] tableName, String callingMethod,
+  static HRegion initHRegion(TableName tableName, String callingMethod,
       byte[]... families) throws IOException {
     return initHRegion(tableName, callingMethod, HBaseConfiguration.create(),
         families);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
index 9b8dabf..fa152ca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -76,12 +77,17 @@ public class TestHRegionOnCluster {
 
       // Put data: r1->v1
       Log.info("Loading r1 to v1 into " + TABLENAME);
-      HTable table = (HTable) TEST_UTIL.getConnection().getTable(TABLENAME);
+      Table table = TEST_UTIL.getConnection().getTable(TABLENAME);
       putDataAndVerify(table, "r1", FAMILY, "v1", 1);
 
       TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
       // Move region to target server
-      HRegionInfo regionInfo = table.getRegionLocation("r1").getRegionInfo();
+
+      HRegionInfo regionInfo;
+      try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TABLENAME)) {
+        regionInfo = locator.getRegionLocation(Bytes.toBytes("r1")).getRegionInfo();
+      }
+
       int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
       HRegionServer originServer = cluster.getRegionServer(originServerNum);
       int targetServerNum = (originServerNum + 1) % NUM_RS;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
index 82689e4..c6aaf67 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
@@ -110,7 +110,7 @@ public class TestPerColumnFamilyFlush {
   }
 
   // A helper function to verify edits.
-  void verifyEdit(int familyNum, int putNum, HTable table) throws IOException {
+  void verifyEdit(int familyNum, int putNum, Table table) throws IOException {
     Result r = table.get(createGet(familyNum, putNum));
     byte[] family = FAMILIES[familyNum - 1];
     byte[] qf = Bytes.toBytes("q" + familyNum);
@@ -342,7 +342,7 @@ public class TestPerColumnFamilyFlush {
       TEST_UTIL.startMiniCluster(numRegionServers);
       TEST_UTIL.getHBaseAdmin().createNamespace(
         NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build());
-      HTable table = TEST_UTIL.createTable(TABLENAME, FAMILIES);
+      Table table = TEST_UTIL.createTable(TABLENAME, FAMILIES);
       HTableDescriptor htd = table.getTableDescriptor();
 
       for (byte[] family : FAMILIES) {
@@ -360,7 +360,6 @@ public class TestPerColumnFamilyFlush {
           table.put(createPut(3, i));
         }
       }
-      table.flushCommits();
       Thread.sleep(1000);
 
       Pair<Region, HRegionServer> desiredRegionAndServer = getRegionWithName(TABLENAME);
@@ -466,8 +465,7 @@ public class TestPerColumnFamilyFlush {
     final int numRegionServers = 1;
     TEST_UTIL.startMiniCluster(numRegionServers);
     try {
-      HTable table = null;
-      table = TEST_UTIL.createTable(tableName, FAMILIES);
+      Table table = TEST_UTIL.createTable(tableName, FAMILIES);
       // Force flush the namespace table so edits to it are not hanging around as oldest
       // edits. Otherwise, below, when we make maximum number of WAL files, then it will be
       // the namespace region that is flushed and not the below 'desiredRegion'.
@@ -489,7 +487,6 @@ public class TestPerColumnFamilyFlush {
         for (int j = 0; j < 100; j++) {
           table.put(createPut(1, i * 100 + j));
         }
-        table.flushCommits();
         // Roll the WAL. The log file count is less than maxLogs so no flush is triggered.
         int currentNumRolledLogFiles = getNumRolledLogFiles(desiredRegion);
         assertNull(getWAL(desiredRegion).rollWriter());
@@ -503,7 +500,6 @@ public class TestPerColumnFamilyFlush {
       assertTrue(desiredRegion.getStore(FAMILY2).getMemStoreSize() < cfFlushSizeLowerBound);
       assertTrue(desiredRegion.getStore(FAMILY3).getMemStoreSize() < cfFlushSizeLowerBound);
       table.put(createPut(1, 12345678));
-      table.flushCommits();
       // Make numRolledLogFiles greater than maxLogs
       desiredRegionAndServer.getSecond().walRoller.requestRollAll();
       // Wait for some time till the flush caused by log rolling happens.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
index e924c4c..6dbdfb5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -53,7 +54,7 @@ import org.junit.experimental.categories.Category;
 public class TestRegionFavoredNodes {
 
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static HTable table;
+  private static Table table;
   private static final TableName TABLE_NAME =
       TableName.valueOf("table");
   private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family");

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
index 85a8cd2..9ee0a76 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.TableName;
@@ -68,7 +69,7 @@ public class TestRegionReplicas {
   private static final Log LOG = LogFactory.getLog(TestRegionReplicas.class);
 
   private static final int NB_SERVERS = 1;
-  private static HTable table;
+  private static Table table;
   private static final byte[] row = "TestRegionReplicas".getBytes();
 
   private static HRegionInfo hriPrimary;
@@ -91,7 +92,9 @@ public class TestRegionReplicas {
     // Create table then get the single region for our new table.
     table = HTU.createTable(tableName, f);
 
-    hriPrimary = table.getRegionLocation(row, false).getRegionInfo();
+    try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
+      hriPrimary = locator.getRegionLocation(row, false).getRegionInfo();
+    }
 
     // mock a secondary region info to open
     hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 5498d66..c164363 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -337,7 +337,7 @@ public class TestRegionServerMetrics {
       p.add(cf, qualifier, val);
       puts.add(p);
     }
-    try (HTable t = TEST_UTIL.createTable(tableName, cf)) {
+    try (Table t = TEST_UTIL.createTable(tableName, cf)) {
       t.put(puts);
 
       Scan s = new Scan();
@@ -387,7 +387,7 @@ public class TestRegionServerMetrics {
       p.add(cf, qualifier, val);
       puts.add(p);
     }
-    try (HTable t = TEST_UTIL.createTable(tableName, cf)) {
+    try (Table t = TEST_UTIL.createTable(tableName, cf)) {
       t.put(puts);
 
       Scan s = new Scan();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index d150e01..592bf1e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
@@ -58,7 +60,7 @@ import com.google.protobuf.ServiceException;
 public class TestRegionServerNoMaster {
 
   private static final int NB_SERVERS = 1;
-  private static HTable table;
+  private static Table table;
   private static final byte[] row = "ee".getBytes();
 
   private static HRegionInfo hri;
@@ -78,7 +80,9 @@ public class TestRegionServerNoMaster {
     p.add(HConstants.CATALOG_FAMILY, row, row);
     table.put(p);
 
-    hri = table.getRegionLocation(row, false).getRegionInfo();
+    try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
+      hri = locator.getRegionLocation(row, false).getRegionInfo();
+    }
     regionName = hri.getRegionName();
 
     stopMasterAndAssignMeta(HTU);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java
index 9c6ee1a..9a8a4d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java
@@ -28,7 +28,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -52,14 +55,14 @@ public class TestRegionServerOnlineConfigChange {
   private static HBaseTestingUtility hbaseTestingUtility = new HBaseTestingUtility();
   private static Configuration conf = null;
 
-  private static HTable t1 = null;
+  private static Table t1 = null;
   private static HRegionServer rs1 = null;
   private static byte[] r1name = null;
   private static Region r1 = null;
 
   private final static String table1Str = "table1";
   private final static String columnFamily1Str = "columnFamily1";
-  private final static byte[] TABLE1 = Bytes.toBytes(table1Str);
+  private final static TableName TABLE1 = TableName.valueOf(table1Str);
   private final static byte[] COLUMN_FAMILY1 = Bytes.toBytes(columnFamily1Str);
 
 
@@ -68,12 +71,13 @@ public class TestRegionServerOnlineConfigChange {
     conf = hbaseTestingUtility.getConfiguration();
     hbaseTestingUtility.startMiniCluster(1,1);
     t1 = hbaseTestingUtility.createTable(TABLE1, COLUMN_FAMILY1);
-    @SuppressWarnings("deprecation")
-    HRegionInfo firstHRI = t1.getRegionLocations().keySet().iterator().next();
-    r1name = firstHRI.getRegionName();
-    rs1 = hbaseTestingUtility.getHBaseCluster().getRegionServer(
-        hbaseTestingUtility.getHBaseCluster().getServerWith(r1name));
-    r1 = rs1.getRegion(r1name);
+    try (RegionLocator locator = hbaseTestingUtility.getConnection().getRegionLocator(TABLE1)) {
+      HRegionInfo firstHRI = locator.getAllRegionLocations().get(0).getRegionInfo();
+      r1name = firstHRI.getRegionName();
+      rs1 = hbaseTestingUtility.getHBaseCluster().getRegionServer(
+          hbaseTestingUtility.getHBaseCluster().getServerWith(r1name));
+      r1 = rs1.getRegion(r1name);
+    }
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
index 86515a6..ef7f105 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -83,7 +84,9 @@ public class TestScannerWithBulkload {
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
     final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
-    bulkload.doBulkLoad(hfilePath, (HTable) table);
+    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
+      bulkload.doBulkLoad(hfilePath, admin, table, locator);
+    }
     ResultScanner scanner = table.getScanner(scan);
     Result result = scanner.next();
     result = scanAfterBulkLoad(scanner, result, "version2");
@@ -168,8 +171,8 @@ public class TestScannerWithBulkload {
   private Table init(HBaseAdmin admin, long l, Scan scan, TableName tableName) throws Exception {
     Table table = TEST_UTIL.getConnection().getTable(tableName);
     Put put0 = new Put(Bytes.toBytes("row1"));
-    put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
-        .toBytes("version0")));
+    put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
+        Bytes.toBytes("version0")));
     table.put(put0);
     admin.flush(tableName);
     Put put1 = new Put(Bytes.toBytes("row2"));
@@ -195,9 +198,9 @@ public class TestScannerWithBulkload {
 
   @Test
   public void testBulkLoadWithParallelScan() throws Exception {
-    TableName tableName = TableName.valueOf("testBulkLoadWithParallelScan");
+    final TableName tableName = TableName.valueOf("testBulkLoadWithParallelScan");
       final long l = System.currentTimeMillis();
-    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+    final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
     createTable(admin, tableName);
     Scan scan = createScan();
     final Table table = init(admin, l, scan, tableName);
@@ -217,7 +220,9 @@ public class TestScannerWithBulkload {
           put1.add(new KeyValue(Bytes.toBytes("row5"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
               Bytes.toBytes("version0")));
           table.put(put1);
-          bulkload.doBulkLoad(hfilePath, (HTable) table);
+          try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
+            bulkload.doBulkLoad(hfilePath, admin, table, locator);
+          }
           latch.countDown();
         } catch (TableNotFoundException e) {
         } catch (IOException e) {
@@ -231,7 +236,6 @@ public class TestScannerWithBulkload {
     scanAfterBulkLoad(scanner, result, "version1");
     scanner.close();
     table.close();
-
   }
 
   @Test
@@ -248,7 +252,9 @@ public class TestScannerWithBulkload {
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
     final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
-    bulkload.doBulkLoad(hfilePath, (HTable) table);
+    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
+      bulkload.doBulkLoad(hfilePath, admin, table, locator);
+    }
     ResultScanner scanner = table.getScanner(scan);
     Result result = scanner.next();
     // We had 'version0', 'version1' for 'row1,col:q' in the table.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java
index 5ce4456..6578d74 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java
@@ -156,18 +156,18 @@ public class TestServerCustomProtocol {
   @Before
   public void before()  throws Exception {
     final byte[][] SPLIT_KEYS = new byte[][] { ROW_B, ROW_C };
-    HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
+    Table table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
 
     Put puta = new Put( ROW_A );
-    puta.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
+    puta.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
     table.put(puta);
 
     Put putb = new Put( ROW_B );
-    putb.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
+    putb.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
     table.put(putb);
 
     Put putc = new Put( ROW_C );
-    putc.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
+    putc.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
     table.put(putc);
   }
 
@@ -306,8 +306,8 @@ public class TestServerCustomProtocol {
 
   @Test
   public void testSingleMethod() throws Throwable {
-    try (HTable table = (HTable) util.getConnection().getTable(TEST_TABLE)) {
-      RegionLocator locator = table.getRegionLocator();
+    try (Table table = util.getConnection().getTable(TEST_TABLE);
+        RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) {
       Map<byte [], String> results = table.coprocessorService(PingProtos.PingService.class,
         null, ROW_A,
         new Batch.Call<PingProtos.PingService, String>() {
@@ -335,10 +335,11 @@ public class TestServerCustomProtocol {
 
   @Test
   public void testRowRange() throws Throwable {
-    try (HTable table = (HTable) util.getConnection().getTable(TEST_TABLE)) {
-      RegionLocator locator = table.getRegionLocator();
-      for (Entry<HRegionInfo, ServerName> e: table.getRegionLocations().entrySet()) {
-        LOG.info("Region " + e.getKey().getRegionNameAsString() + ", servername=" + e.getValue());
+    try (Table table = util.getConnection().getTable(TEST_TABLE);
+        RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) {
+      for (HRegionLocation e: locator.getAllRegionLocations()) {
+        LOG.info("Region " + e.getRegionInfo().getRegionNameAsString()
+            + ", servername=" + e.getServerName());
       }
       // Here are what regions looked like on a run:
       //
@@ -355,7 +356,7 @@ public class TestServerCustomProtocol {
       results = ping(table, ROW_BC, null);
       assertEquals(2, results.size());
       // should contain last 2 regions
-      HRegionLocation loc = table.getRegionLocation(ROW_A, true);
+      HRegionLocation loc = locator.getRegionLocation(ROW_A, true);
       assertNull("Should be missing region for row aaa (prior to start row)",
         results.get(loc.getRegionInfo().getRegionName()));
       verifyRegionResults(locator, results, ROW_B);
@@ -367,7 +368,7 @@ public class TestServerCustomProtocol {
       assertEquals(2, results.size());
       verifyRegionResults(locator, results, ROW_A);
       verifyRegionResults(locator, results, ROW_B);
-      loc = table.getRegionLocation(ROW_C, true);
+      loc = locator.getRegionLocation(ROW_C, true);
       assertNull("Should be missing region for row ccc (past stop row)",
           results.get(loc.getRegionInfo().getRegionName()));
   
@@ -377,7 +378,7 @@ public class TestServerCustomProtocol {
       assertEquals(2, results.size());
       verifyRegionResults(locator, results, ROW_A);
       verifyRegionResults(locator, results, ROW_B);
-      loc = table.getRegionLocation(ROW_C, true);
+      loc = locator.getRegionLocation(ROW_C, true);
       assertNull("Should be missing region for row ccc (past stop row)",
           results.get(loc.getRegionInfo().getRegionName()));
   
@@ -386,10 +387,10 @@ public class TestServerCustomProtocol {
       // should only contain region bbb
       assertEquals(1, results.size());
       verifyRegionResults(locator, results, ROW_B);
-      loc = table.getRegionLocation(ROW_A, true);
+      loc = locator.getRegionLocation(ROW_A, true);
       assertNull("Should be missing region for row aaa (prior to start)",
           results.get(loc.getRegionInfo().getRegionName()));
-      loc = table.getRegionLocation(ROW_C, true);
+      loc = locator.getRegionLocation(ROW_C, true);
       assertNull("Should be missing region for row ccc (past stop row)",
           results.get(loc.getRegionInfo().getRegionName()));
     }
@@ -415,8 +416,8 @@ public class TestServerCustomProtocol {
 
   @Test
   public void testCompoundCall() throws Throwable {
-    try (HTable table = (HTable) util.getConnection().getTable(TEST_TABLE)) {
-      RegionLocator locator = table.getRegionLocator();
+    try (Table table = util.getConnection().getTable(TEST_TABLE);
+        RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) {
       Map<byte [], String> results = compoundOfHelloAndPing(table, ROW_A, ROW_C);
       verifyRegionResults(locator, results, "Hello, pong", ROW_A);
       verifyRegionResults(locator, results, "Hello, pong", ROW_B);
@@ -426,8 +427,8 @@ public class TestServerCustomProtocol {
 
   @Test
   public void testNullCall() throws Throwable {
-    try(HTable table = (HTable) util.getConnection().getTable(TEST_TABLE)) {
-      RegionLocator locator = table.getRegionLocator();
+    try (Table table = util.getConnection().getTable(TEST_TABLE);
+        RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) {
       Map<byte[],String> results = hello(table, null, ROW_A, ROW_C);
       verifyRegionResults(locator, results, "Who are you?", ROW_A);
       verifyRegionResults(locator, results, "Who are you?", ROW_B);
@@ -437,8 +438,8 @@ public class TestServerCustomProtocol {
 
   @Test
   public void testNullReturn() throws Throwable {
-    try (HTable table = (HTable) util.getConnection().getTable(TEST_TABLE)) {
-      RegionLocator locator = table.getRegionLocator();
+    try (Table table = util.getConnection().getTable(TEST_TABLE);
+        RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) {
       Map<byte[],String> results = hello(table, "nobody", ROW_A, ROW_C);
       verifyRegionResults(locator, results, null, ROW_A);
       verifyRegionResults(locator, results, null, ROW_B);
@@ -448,7 +449,7 @@ public class TestServerCustomProtocol {
 
   @Test
   public void testEmptyReturnType() throws Throwable {
-    try (HTable table = (HTable) util.getConnection().getTable(TEST_TABLE)) {
+    try (Table table = util.getConnection().getTable(TEST_TABLE)) {
       Map<byte[],String> results = noop(table, ROW_A, ROW_C);
       assertEquals("Should have results from three regions", 3, results.size());
       // all results should be null

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 86d196e..2fe5654 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -298,7 +298,7 @@ public class TestSplitTransactionOnCluster {
         TableName.valueOf("testRSSplitDaughtersAreOnlinedAfterShutdownHandling");
 
     // Create table then get the single region for our new table.
-    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
+    Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -343,7 +343,7 @@ public class TestSplitTransactionOnCluster {
         TableName.valueOf("testExistingZnodeBlocksSplitAndWeRollback");
 
     // Create table then get the single region for our new table.
-    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
+    Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -401,7 +401,7 @@ public class TestSplitTransactionOnCluster {
         TableName.valueOf("testShutdownFixupWhenDaughterHasSplit");
 
     // Create table then get the single region for our new table.
-    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
+    Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -567,7 +567,7 @@ public class TestSplitTransactionOnCluster {
         .valueOf("testMasterRestartAtRegionSplitPendingCatalogJanitor");
 
     // Create table then get the single region for our new table.
-    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
+    Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -683,8 +683,7 @@ public class TestSplitTransactionOnCluster {
     htd.setRegionReplication(2);
     htd.addCoprocessor(SlowMeCopro.class.getName());
     // Create table then get the single region for our new table.
-    Table t = TESTING_UTIL.createTable(htd, new byte[][]{Bytes.toBytes("cf")},
-        TESTING_UTIL.getConfiguration());
+    Table t = TESTING_UTIL.createTable(htd, new byte[][]{Bytes.toBytes("cf")}, null);
     List<HRegion> oldRegions;
     do {
       oldRegions = cluster.getRegions(tableName);
@@ -965,7 +964,7 @@ public class TestSplitTransactionOnCluster {
       desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
       admin.createTable(desc);
       Connection connection = ConnectionFactory.createConnection(cluster.getConfiguration());
-      HTable hTable = (HTable) connection.getTable(desc.getTableName());
+      Table hTable = connection.getTable(desc.getTableName());
       for(int i = 1; i < 5; i++) {
         Put p1 = new Put(("r"+i).getBytes());
         p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
@@ -1266,9 +1265,9 @@ public class TestSplitTransactionOnCluster {
     return regions;
   }
 
-  private HTable createTableAndWait(TableName tableName, byte[] cf) throws IOException,
+  private Table createTableAndWait(TableName tableName, byte[] cf) throws IOException,
       InterruptedException {
-    HTable t = TESTING_UTIL.createTable(tableName, cf);
+    Table t = TESTING_UTIL.createTable(tableName, cf);
     awaitTableRegions(tableName);
     assertTrue("Table not online: " + tableName,
       cluster.getRegions(tableName).size() != 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java
index b2f2898..93c8ebc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java
@@ -87,7 +87,7 @@ public class TestCompactionWithThroughputController {
       admin.disableTable(tableName);
       admin.deleteTable(tableName);
     }
-    HTable table = TEST_UTIL.createTable(tableName, family);
+    Table table = TEST_UTIL.createTable(tableName, family);
     Random rand = new Random();
     for (int i = 0; i < 10; i++) {
       for (int j = 0; j < 10; j++) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
index 6a6cf21..de93305 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
@@ -55,7 +55,7 @@ public class TestReplicationKillRS extends TestReplicationBase {
     Thread killer = killARegionServer(util, 5000, rsToKill1);
 
     LOG.info("Start loading table");
-    int initialCount = utility1.loadTable((HTable)htable1, famName);
+    int initialCount = utility1.loadTable(htable1, famName);
     LOG.info("Done loading table");
     killer.join(5000);
     LOG.info("Done waiting for threads");

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
index 930ffba..fe21070 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
@@ -40,7 +40,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.BaseWALObserver;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -86,7 +88,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
   private static final int NB_SERVERS = 2;
   private static TableName tableName = TableName.valueOf(
     TestRegionReplicaReplicationEndpointNoMaster.class.getSimpleName());
-  private static HTable table;
+  private static Table table;
   private static final byte[] row = "TestRegionReplicaReplicator".getBytes();
 
   private static HRegionServer rs0;
@@ -117,10 +119,12 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
     HTU.startMiniCluster(NB_SERVERS);
 
     // Create table then get the single region for our new table.
-    HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
-    table = HTU.createTable(htd, new byte[][]{f}, HTU.getConfiguration());
+    HTableDescriptor htd = HTU.createTableDescriptor(tableName.getNameAsString());
+    table = HTU.createTable(htd, new byte[][]{f}, null);
 
-    hriPrimary = table.getRegionLocation(row, false).getRegionInfo();
+    try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
+      hriPrimary = locator.getRegionLocation(row, false).getRegionInfo();
+    }
 
     // mock a secondary region info to open
     hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 77f9bc7..6d7a2ba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -955,10 +955,12 @@ public class TestAccessController extends SecureTestUtil {
 
 
       try (Connection conn = ConnectionFactory.createConnection(conf);
-           HTable table = (HTable)conn.getTable(tableName)) {
+           Admin admin = conn.getAdmin();
+           RegionLocator locator = conn.getRegionLocator(tableName);
+           Table table = conn.getTable(tableName)) {
         TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
         LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
-        loader.doBulkLoad(loadPath, table);
+        loader.doBulkLoad(loadPath, admin, table, locator);
       }
     }
 
@@ -1964,7 +1966,7 @@ public class TestAccessController extends SecureTestUtil {
     HRegionLocation location = regions.get(0);
     final HRegionInfo hri = location.getRegionInfo();
     final ServerName server = location.getServerName();
-    try (HTable table = (HTable) systemUserConnection.getTable(TEST_TABLE2)) {
+    try (Table table = systemUserConnection.getTable(TEST_TABLE2)) {
       AccessTestAction moveAction = new AccessTestAction() {
         @Override
         public Object run() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index 0ff87af..a285eae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -144,7 +145,7 @@ public class TestHBaseFsck {
   private static Admin admin;
 
   // for the instance, reset every test run
-  private HTable tbl;
+  private Table tbl;
   private final static byte[][] SPLITS = new byte[][] { Bytes.toBytes("A"),
     Bytes.toBytes("B"), Bytes.toBytes("C") };
   // one row per region.
@@ -155,7 +156,7 @@ public class TestHBaseFsck {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
-      MasterSyncObserver.class.getName());
+        MasterSyncObserver.class.getName());
 
     conf.setInt("hbase.regionserver.handler.count", 2);
     conf.setInt("hbase.regionserver.metahandler.count", 30);
@@ -326,7 +327,8 @@ public class TestHBaseFsck {
   private void deleteRegion(Configuration conf, final HTableDescriptor htd,
       byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow,
       boolean hdfs) throws IOException, InterruptedException {
-    deleteRegion(conf, htd, startKey, endKey, unassign, metaRow, hdfs, false, HRegionInfo.DEFAULT_REPLICA_ID);
+    deleteRegion(conf, htd, startKey, endKey, unassign, metaRow, hdfs, false,
+        HRegionInfo.DEFAULT_REPLICA_ID);
   }
 
   /**
@@ -344,7 +346,11 @@ public class TestHBaseFsck {
     LOG.info("** Before delete:");
     dumpMeta(htd.getTableName());
 
-    List<HRegionLocation> locations = tbl.getAllRegionLocations();
+    List<HRegionLocation> locations;
+    try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
+      locations = rl.getAllRegionLocations();
+    }
+
     for (HRegionLocation location : locations) {
       HRegionInfo hri = location.getRegionInfo();
       ServerName hsa = location.getServerName();
@@ -426,15 +432,14 @@ public class TestHBaseFsck {
     desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
     createTable(TEST_UTIL, desc, SPLITS);
 
-    tbl = (HTable) connection.getTable(tablename, tableExecutorService);
+    tbl = connection.getTable(tablename, tableExecutorService);
     List<Put> puts = new ArrayList<Put>();
     for (byte[] row : ROWKEYS) {
       Put p = new Put(row);
-      p.add(FAM, Bytes.toBytes("val"), row);
+      p.addColumn(FAM, Bytes.toBytes("val"), row);
       puts.add(p);
     }
     tbl.put(puts);
-    tbl.flushCommits();
   }
 
   /**
@@ -683,13 +688,12 @@ public class TestHBaseFsck {
       TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT);
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS,
-            ERROR_CODE.DUPE_STARTKEYS});
+      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS });
       assertEquals(2, hbck.getOverlapGroups(table).size());
       assertEquals(ROWKEYS.length, countRows()); // seems like the "bigger" region won.
 
       // fix the degenerate region.
-      doFsck(conf,true);
+      doFsck(conf, true);
 
       // check that the degenerate region is gone and no data loss
       HBaseFsck hbck2 = doFsck(conf,false);
@@ -727,16 +731,16 @@ public class TestHBaseFsck {
       admin.flush(table);
       assertNoErrors(doFsck(conf, false));
       assertEquals(ROWKEYS.length, countRows());
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
-          Bytes.toBytes("C"), true, false, false, false, 1); // unassign one replica
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true,
+          false, false, false, 1); // unassign one replica
       // check that problem exists
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.NOT_DEPLOYED});
+      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_DEPLOYED });
       // fix the problem
       hbck = doFsck(conf, true);
       // run hbck again to make sure we don't see any errors
       hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[]{});
+      assertErrors(hbck, new ERROR_CODE[] {});
     } finally {
       cleanupTable(table);
     }
@@ -868,13 +872,12 @@ public class TestHBaseFsck {
 
       // TODO why is dupe region different from dupe start keys?
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS,
-            ERROR_CODE.DUPE_STARTKEYS});
+      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS });
       assertEquals(2, hbck.getOverlapGroups(table).size());
       assertEquals(ROWKEYS.length, countRows()); // seems like the "bigger" region won.
 
       // fix the degenerate region.
-      doFsck(conf,true);
+      doFsck(conf, true);
 
       // check that the degenerate region is gone and no data loss
       HBaseFsck hbck2 = doFsck(conf,false);
@@ -894,7 +897,7 @@ public class TestHBaseFsck {
     TableName table = TableName.valueOf("tableDegenerateRegions");
     try {
       setupTable(table);
-      assertNoErrors(doFsck(conf,false));
+      assertNoErrors(doFsck(conf, false));
       assertEquals(ROWKEYS.length, countRows());
 
       // Now let's mess it up, by adding a region with a duplicate startkey
@@ -913,7 +916,7 @@ public class TestHBaseFsck {
       assertEquals(ROWKEYS.length, countRows());
 
       // fix the degenerate region.
-      doFsck(conf,true);
+      doFsck(conf, true);
 
       // check that the degenerate region is gone and no data loss
       HBaseFsck hbck2 = doFsck(conf,false);
@@ -947,8 +950,7 @@ public class TestHBaseFsck {
       TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT);
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {
-          ERROR_CODE.OVERLAP_IN_REGION_CHAIN });
+      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.OVERLAP_IN_REGION_CHAIN });
       assertEquals(2, hbck.getOverlapGroups(table).size());
       assertEquals(ROWKEYS.length, countRows());
 
@@ -1073,8 +1075,8 @@ public class TestHBaseFsck {
 
       // Mess it up by creating an overlap in the metadata
       admin.disableTable(table);
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"),
-          Bytes.toBytes("B"), true, true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true,
+          true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
       admin.enableTable(table);
 
       HRegionInfo hriOverlap =
@@ -1086,9 +1088,9 @@ public class TestHBaseFsck {
       TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT);
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {
-          ERROR_CODE.ORPHAN_HDFS_REGION, ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
-          ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(hbck,
+          new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION, ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+              ERROR_CODE.HOLE_IN_REGION_CHAIN });
 
       // fix the problem.
       doFsck(conf, true);
@@ -1158,13 +1160,12 @@ public class TestHBaseFsck {
 
       // Mess it up by leaving a hole in the assignment, meta, and hdfs data
       admin.disableTable(table);
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
-          Bytes.toBytes("C"), true, true, true);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true,
+          true, true);
       admin.enableTable(table);
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {
-          ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN });
       // holes are separate from overlap groups
       assertEquals(0, hbck.getOverlapGroups(table).size());
 
@@ -1173,7 +1174,7 @@ public class TestHBaseFsck {
 
       // check that hole fixed
       assertNoErrors(doFsck(conf,false));
-      assertEquals(ROWKEYS.length - 2 , countRows()); // lost a region so lost a row
+      assertEquals(ROWKEYS.length - 2, countRows()); // lost a region so lost a row
     } finally {
       cleanupTable(table);
     }
@@ -1192,15 +1193,14 @@ public class TestHBaseFsck {
 
       // Mess it up by leaving a hole in the meta data
       admin.disableTable(table);
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
-          Bytes.toBytes("C"), true, true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true,
+          true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
       admin.enableTable(table);
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {
-          ERROR_CODE.ORPHAN_HDFS_REGION,
-          ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
-          ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(hbck,
+          new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION, ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+              ERROR_CODE.HOLE_IN_REGION_CHAIN });
       // holes are separate from overlap groups
       assertEquals(0, hbck.getOverlapGroups(table).size());
 
@@ -1229,22 +1229,22 @@ public class TestHBaseFsck {
 
       // Mess it up by leaving a hole in the meta data
       admin.disableTable(table);
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
-          Bytes.toBytes("C"), true, true, false); // don't rm from fs
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true,
+          true, false); // don't rm from fs
       admin.enableTable(table);
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {
-          ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(hbck,
+          new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN });
       // holes are separate from overlap groups
       assertEquals(0, hbck.getOverlapGroups(table).size());
 
       // fix hole
-      assertErrors(doFsck(conf, true) , new ERROR_CODE[] {
-          ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(doFsck(conf, true),
+          new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN });
 
       // check that hole fixed
-      assertNoErrors(doFsck(conf,false));
+      assertNoErrors(doFsck(conf, false));
       assertEquals(ROWKEYS.length, countRows());
     } finally {
       cleanupTable(table);
@@ -1260,7 +1260,7 @@ public class TestHBaseFsck {
       desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
       createTable(TEST_UTIL, desc, null);
 
-      tbl = (HTable) connection.getTable(desc.getTableName());
+      tbl = connection.getTable(desc.getTableName());
       for (int i = 0; i < 5; i++) {
         Put p1 = new Put(("r" + i).getBytes());
         p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
@@ -1293,9 +1293,9 @@ public class TestHBaseFsck {
 
       // fix hole
       assertErrors(
-        doFsck(conf, false, true, false, false, false, false, false, false, false, false, null),
-        new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
-          ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
+          doFsck(conf, false, true, false, false, false, false, false, false, false, false, null),
+          new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+              ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
 
       // check that hole fixed
       assertNoErrors(doFsck(conf, false));
@@ -1322,22 +1322,22 @@ public class TestHBaseFsck {
 
       // Mess it up by leaving a hole in the meta data
       admin.disableTable(table);
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
-          Bytes.toBytes("C"), false, true, false); // don't rm from fs
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false,
+          true, false); // don't rm from fs
       admin.enableTable(table);
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {
-          ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(hbck,
+          new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN });
       // holes are separate from overlap groups
       assertEquals(0, hbck.getOverlapGroups(table).size());
 
       // fix hole
-      assertErrors(doFsck(conf, true) , new ERROR_CODE[] {
-          ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(doFsck(conf, true),
+          new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN });
 
       // check that hole fixed
-      assertNoErrors(doFsck(conf,false));
+      assertNoErrors(doFsck(conf, false));
       assertEquals(ROWKEYS.length, countRows());
     } finally {
       cleanupTable(table);
@@ -1360,8 +1360,8 @@ public class TestHBaseFsck {
       admin.flush(table);
 
       // Mess it up by leaving a hole in the hdfs data
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
-          Bytes.toBytes("C"), false, false, true); // don't rm meta
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false,
+          false, true); // don't rm meta
 
       HBaseFsck hbck = doFsck(conf, false);
       assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS});
@@ -1407,17 +1407,17 @@ public class TestHBaseFsck {
       admin.flush(table);
 
       // Mess it up by leaving a hole in the hdfs data
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
-          Bytes.toBytes("C"), false, false, true); // don't rm meta
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false,
+          false, true); // don't rm meta
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS});
+      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_HDFS });
 
       // fix hole
       doFsck(conf, true);
 
       // check that hole fixed
-      assertNoErrors(doFsck(conf,false));
+      assertNoErrors(doFsck(conf, false));
       assertEquals(ROWKEYS.length - 2, countRows());
 
       // the following code checks whether the old primary/secondary has
@@ -1633,20 +1633,18 @@ public class TestHBaseFsck {
       // make sure data in regions, if in wal only there is no data loss
       admin.flush(table2);
       // Mess them up by leaving a hole in the hdfs data
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
-        Bytes.toBytes("C"), false, false, true); // don't rm meta
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false,
+          false, true); // don't rm meta
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {
-        ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS});
+      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS });
 
       // fix hole in table 1
       doFsck(conf, true, table1);
       // check that hole in table 1 fixed
       assertNoErrors(doFsck(conf, false, table1));
       // check that hole in table 2 still there
-      assertErrors(doFsck(conf, false, table2),
-        new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS});
+      assertErrors(doFsck(conf, false, table2), new ERROR_CODE[] { ERROR_CODE.NOT_IN_HDFS });
 
       // fix hole in table 2
       doFsck(conf, true, table2);
@@ -1672,7 +1670,11 @@ public class TestHBaseFsck {
 
       // make sure data in regions, if in wal only there is no data loss
       admin.flush(table);
-      HRegionLocation location = tbl.getRegionLocation("B");
+
+      HRegionLocation location;
+      try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
+        location = rl.getRegionLocation(Bytes.toBytes("B"));
+      }
 
       // Delete one region from meta, but not hdfs, unassign it.
       deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
@@ -1752,33 +1754,38 @@ public class TestHBaseFsck {
 
       // make sure data in regions, if in wal only there is no data loss
       admin.flush(table);
-      HRegionLocation location = tbl.getRegionLocation(Bytes.toBytes("B"));
 
-      meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
-      HRegionInfo hri = location.getRegionInfo();
+      try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
+        HRegionLocation location = rl.getRegionLocation(Bytes.toBytes("B"));
 
-      // do a regular split
-      byte[] regionName = location.getRegionInfo().getRegionName();
-      admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
-      TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true);
+        meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
+        HRegionInfo hri = location.getRegionInfo();
 
-      // TODO: fixHdfsHoles does not work against splits, since the parent dir lingers on
-      // for some time until children references are deleted. HBCK erroneously sees this as
-      // overlapping regions
-      HBaseFsck hbck = doFsck(conf, true, true, false, false, false, true, true, true, false, false, null);
-      assertErrors(hbck, new ERROR_CODE[] {}); //no LINGERING_SPLIT_PARENT reported
+        // do a regular split
+        byte[] regionName = location.getRegionInfo().getRegionName();
+        admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
+        TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true);
 
-      // assert that the split hbase:meta entry is still there.
-      Get get = new Get(hri.getRegionName());
-      Result result = meta.get(get);
-      assertNotNull(result);
-      assertNotNull(MetaTableAccessor.getHRegionInfo(result));
+        // TODO: fixHdfsHoles does not work against splits, since the parent dir lingers on
+        // for some time until children references are deleted. HBCK erroneously sees this as
+        // overlapping regions
+        HBaseFsck hbck = doFsck(conf, true, true, false, false, false, true, true, true, false,
+            false, null);
+        assertErrors(hbck, new ERROR_CODE[] {}); //no LINGERING_SPLIT_PARENT reported
 
-      assertEquals(ROWKEYS.length, countRows());
+        // assert that the split hbase:meta entry is still there.
+        Get get = new Get(hri.getRegionName());
+        Result result = meta.get(get);
+        assertNotNull(result);
+        assertNotNull(MetaTableAccessor.getHRegionInfo(result));
 
-      // assert that we still have the split regions
-      assertEquals(tbl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions pre-split.
-      assertNoErrors(doFsck(conf, false));
+        assertEquals(ROWKEYS.length, countRows());
+
+        // assert that we still have the split regions
+        assertEquals(rl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions
+        // pre-split.
+        assertNoErrors(doFsck(conf, false));
+      }
     } finally {
       cleanupTable(table);
       IOUtils.closeQuietly(meta);
@@ -1799,56 +1806,68 @@ public class TestHBaseFsck {
 
       // make sure data in regions, if in wal only there is no data loss
       admin.flush(table);
-      HRegionLocation location = tbl.getRegionLocation(Bytes.toBytes("B"));
-
-      HRegionInfo hri = location.getRegionInfo();
-
-      // do a regular split
-      byte[] regionName = location.getRegionInfo().getRegionName();
-      admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
-      TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true);
-
-      PairOfSameType<HRegionInfo> daughters =
-          MetaTableAccessor.getDaughterRegions(meta.get(new Get(regionName)));
-
-      // Delete daughter regions from meta, but not hdfs, unassign it.
-      Map<HRegionInfo, ServerName> hris = tbl.getRegionLocations();
-      undeployRegion(connection, hris.get(daughters.getFirst()), daughters.getFirst());
-      undeployRegion(connection, hris.get(daughters.getSecond()), daughters.getSecond());
-
-      List<Delete> deletes = new ArrayList<>();
-      deletes.add(new Delete(daughters.getFirst().getRegionName()));
-      deletes.add(new Delete(daughters.getSecond().getRegionName()));
-      meta.delete(deletes);
-
-      // Remove daughters from regionStates
-      RegionStates regionStates = TEST_UTIL.getMiniHBaseCluster().getMaster().
-          getAssignmentManager().getRegionStates();
-      regionStates.deleteRegion(daughters.getFirst());
-      regionStates.deleteRegion(daughters.getSecond());
-
-      HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck,
-          new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
-              ERROR_CODE.HOLE_IN_REGION_CHAIN }); //no LINGERING_SPLIT_PARENT
-
-      // now fix it. The fix should not revert the region split, but add daughters to META
-      hbck = doFsck(conf, true, true, false, false, false, false, false, false, false, false, null);
-      assertErrors(hbck,
-          new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
-              ERROR_CODE.HOLE_IN_REGION_CHAIN });
-
-      // assert that the split hbase:meta entry is still there.
-      Get get = new Get(hri.getRegionName());
-      Result result = meta.get(get);
-      assertNotNull(result);
-      assertNotNull(MetaTableAccessor.getHRegionInfo(result));
-
-      assertEquals(ROWKEYS.length, countRows());
 
-      // assert that we still have the split regions
-      assertEquals(tbl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions pre-split.
-      assertNoErrors(doFsck(conf, false)); //should be fixed by now
+      try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
+        HRegionLocation location = rl.getRegionLocation(Bytes.toBytes("B"));
+
+        HRegionInfo hri = location.getRegionInfo();
+
+        // do a regular split
+        byte[] regionName = location.getRegionInfo().getRegionName();
+        admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
+        TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true);
+
+        PairOfSameType<HRegionInfo> daughters = MetaTableAccessor.getDaughterRegions(
+            meta.get(new Get(regionName)));
+
+        // Delete daughter regions from meta, but not hdfs, unassign it.
+
+        ServerName firstSN =
+            rl.getRegionLocation(daughters.getFirst().getStartKey()).getServerName();
+        ServerName secondSN =
+            rl.getRegionLocation(daughters.getSecond().getStartKey()).getServerName();
+
+        undeployRegion(connection, firstSN, daughters.getFirst());
+        undeployRegion(connection, secondSN, daughters.getSecond());
+
+        List<Delete> deletes = new ArrayList<>();
+        deletes.add(new Delete(daughters.getFirst().getRegionName()));
+        deletes.add(new Delete(daughters.getSecond().getRegionName()));
+        meta.delete(deletes);
+
+        // Remove daughters from regionStates
+        RegionStates regionStates = TEST_UTIL.getMiniHBaseCluster().getMaster().
+            getAssignmentManager().getRegionStates();
+        regionStates.deleteRegion(daughters.getFirst());
+        regionStates.deleteRegion(daughters.getSecond());
+
+        HBaseFsck hbck = doFsck(conf, false);
+        assertErrors(hbck, new ERROR_CODE[] {
+            ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+            ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+            ERROR_CODE.HOLE_IN_REGION_CHAIN }); //no LINGERING_SPLIT_PARENT
+
+        // now fix it. The fix should not revert the region split, but add daughters to META
+        hbck = doFsck(conf, true, true, false, false, false, false, false, false, false,
+            false, null);
+        assertErrors(hbck, new ERROR_CODE[] {
+            ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+            ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+            ERROR_CODE.HOLE_IN_REGION_CHAIN });
+
+        // assert that the split hbase:meta entry is still there.
+        Get get = new Get(hri.getRegionName());
+        Result result = meta.get(get);
+        assertNotNull(result);
+        assertNotNull(MetaTableAccessor.getHRegionInfo(result));
+
+        assertEquals(ROWKEYS.length, countRows());
+
+        // assert that we still have the split regions
+        assertEquals(rl.getStartKeys().length, SPLITS.length + 1 + 1); //SPLITS + 1 is # regions
+        // pre-split.
+        assertNoErrors(doFsck(conf, false)); //should be fixed by now
+      }
     } finally {
       meta.close();
       cleanupTable(table);
@@ -1952,13 +1971,13 @@ public class TestHBaseFsck {
       assertEquals(ROWKEYS.length, countRows());
 
       // Mess it up by closing a region
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"),
-        Bytes.toBytes("B"), true, false, false, false, HRegionInfo.DEFAULT_REPLICA_ID);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true,
+          false, false, false, HRegionInfo.DEFAULT_REPLICA_ID);
 
       // verify there is no other errors
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] {
-        ERROR_CODE.NOT_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(hbck,
+          new ERROR_CODE[] { ERROR_CODE.NOT_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN });
 
       // verify that noHdfsChecking report the same errors
       HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService);
@@ -1967,8 +1986,8 @@ public class TestHBaseFsck {
       fsck.setTimeLag(0);
       fsck.setCheckHdfs(false);
       fsck.onlineHbck();
-      assertErrors(fsck, new ERROR_CODE[] {
-        ERROR_CODE.NOT_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(fsck,
+          new ERROR_CODE[] { ERROR_CODE.NOT_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN });
       fsck.close();
 
       // verify that fixAssignments works fine with noHdfsChecking
@@ -2062,8 +2081,8 @@ public class TestHBaseFsck {
 
       // Mess it up by creating an overlap in the metadata
       admin.disableTable(table);
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"),
-        Bytes.toBytes("B"), true, true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true,
+          true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
       admin.enableTable(table);
 
       HRegionInfo hriOverlap =
@@ -2086,8 +2105,7 @@ public class TestHBaseFsck {
       fsck.setTimeLag(0);
       fsck.setCheckHdfs(false);
       fsck.onlineHbck();
-      assertErrors(fsck, new ERROR_CODE[] {
-        ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(fsck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN });
       fsck.close();
 
       // verify that fixHdfsHoles doesn't work with noHdfsChecking
@@ -2101,7 +2119,7 @@ public class TestHBaseFsck {
       fsck.setFixHdfsOrphans(true);
       fsck.onlineHbck();
       assertFalse(fsck.shouldRerun());
-      assertErrors(fsck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      assertErrors(fsck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN });
       fsck.close();
     } finally {
       if (admin.isTableDisabled(table)) {
@@ -2229,7 +2247,8 @@ public class TestHBaseFsck {
     final FileSystem fs = FileSystem.get(conf);
     HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) {
       @Override
-      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
+      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles)
+          throws IOException {
         return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
           AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);
           @Override
@@ -2259,7 +2278,8 @@ public class TestHBaseFsck {
     final FileSystem fs = FileSystem.get(conf);
     HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) {
       @Override
-      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
+      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles)
+          throws IOException {
         return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
           AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);
           @Override
@@ -2529,16 +2549,20 @@ public class TestHBaseFsck {
     Threads.sleep(300); // wait some more to ensure writeLock.acquire() is called
 
     hbck = doFsck(conf, false);
-    assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.EXPIRED_TABLE_LOCK}); // still one expired, one not-expired
+    assertErrors(hbck, new ERROR_CODE[] {
+        ERROR_CODE.EXPIRED_TABLE_LOCK}); // still one expired, one not-expired
 
     edge.incrementTime(conf.getLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT,
         TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS)); // let table lock expire
 
     hbck = doFsck(conf, false);
-    assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.EXPIRED_TABLE_LOCK, ERROR_CODE.EXPIRED_TABLE_LOCK}); // both are expired
+    assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.EXPIRED_TABLE_LOCK,
+        ERROR_CODE.EXPIRED_TABLE_LOCK}); // both are expired
+
+    conf.setLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, 1);
+    // reaping from ZKInterProcessWriteLock uses znode cTime,
+    // which is not injectable through EnvironmentEdge
 
-    conf.setLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, 1); // reaping from ZKInterProcessWriteLock uses znode cTime,
-                                                                 // which is not injectable through EnvironmentEdge
     Threads.sleep(10);
     hbck = doFsck(conf, true); // now fix both cases
 
@@ -2616,7 +2640,7 @@ public class TestHBaseFsck {
       HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
       desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
       createTable(TEST_UTIL, desc, null);
-      tbl = (HTable) connection.getTable(table, tableExecutorService);
+      tbl = connection.getTable(table, tableExecutorService);
 
       // Mess it up by leaving a hole in the assignment, meta, and hdfs data
       deleteRegion(conf, tbl.getTableDescriptor(), HConstants.EMPTY_START_ROW,
@@ -2648,35 +2672,36 @@ public class TestHBaseFsck {
       setupTable(table);
       assertEquals(ROWKEYS.length, countRows());
 
-      // make sure data in regions, if in wal only there is no data loss
-      admin.flush(table);
-      HRegionInfo region1 = tbl.getRegionLocation(Bytes.toBytes("A")).getRegionInfo();
-      HRegionInfo region2 = tbl.getRegionLocation(Bytes.toBytes("B")).getRegionInfo();
+      try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
+        // make sure data in regions, if in wal only there is no data loss
+        admin.flush(table);
+        HRegionInfo region1 = rl.getRegionLocation(Bytes.toBytes("A")).getRegionInfo();
+        HRegionInfo region2 = rl.getRegionLocation(Bytes.toBytes("B")).getRegionInfo();
 
-      int regionCountBeforeMerge = tbl.getRegionLocations().size();
+        int regionCountBeforeMerge = rl.getAllRegionLocations().size();
 
-      assertNotEquals(region1, region2);
+        assertNotEquals(region1, region2);
 
-      // do a region merge
-      admin.mergeRegions(region1.getEncodedNameAsBytes(),
-          region2.getEncodedNameAsBytes(), false);
+        // do a region merge
+        admin.mergeRegions(region1.getEncodedNameAsBytes(), region2.getEncodedNameAsBytes(), false);
 
-      // wait until region merged
-      long timeout = System.currentTimeMillis() + 30 * 1000;
-      while (true) {
-        if (tbl.getRegionLocations().size() < regionCountBeforeMerge) {
-          break;
-        } else if (System.currentTimeMillis() > timeout) {
-          fail("Time out waiting on region " + region1.getEncodedName()
-              + " and " + region2.getEncodedName() + " be merged");
+        // wait until region merged
+        long timeout = System.currentTimeMillis() + 30 * 1000;
+        while (true) {
+          if (rl.getAllRegionLocations().size() < regionCountBeforeMerge) {
+            break;
+          } else if (System.currentTimeMillis() > timeout) {
+            fail("Time out waiting on region " + region1.getEncodedName() + " and " + region2
+                .getEncodedName() + " be merged");
+          }
+          Thread.sleep(10);
         }
-        Thread.sleep(10);
-      }
 
-      assertEquals(ROWKEYS.length, countRows());
+        assertEquals(ROWKEYS.length, countRows());
 
-      HBaseFsck hbck = doFsck(conf, false);
-      assertNoErrors(hbck); // no errors
+        HBaseFsck hbck = doFsck(conf, false);
+        assertNoErrors(hbck); // no errors
+      }
 
     } finally {
       TEST_UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestProcessBasedCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestProcessBasedCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestProcessBasedCluster.java
index 513d538..085ddf5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestProcessBasedCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestProcessBasedCluster.java
@@ -58,7 +58,7 @@ public class TestProcessBasedCluster {
     cluster.startMiniDFS();
     cluster.startHBase();
     try {
-      TEST_UTIL.createRandomTable(HTestConst.DEFAULT_TABLE_STR,
+      TEST_UTIL.createRandomTable(HTestConst.DEFAULT_TABLE,
           HTestConst.DEFAULT_CF_STR_SET,
           HColumnDescriptor.DEFAULT_VERSIONS, COLS_PER_ROW, FLUSHES, NUM_REGIONS,
           ROWS_PER_FLUSH);