You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2015/06/25 23:42:51 UTC

[3/6] hbase git commit: HBASE-13893 Replace HTable with Table in client tests (Jurriaan Mous)

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
index b3c29b7..9acc5ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
@@ -389,55 +389,57 @@ public class TestHFileOutputFormat  {
       util.startMiniCluster();
       Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
       HBaseAdmin admin = util.getHBaseAdmin();
-      HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
+      Table table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
       assertEquals("Should start with empty table",
           0, util.countRows(table));
       int numRegions = -1;
-      try(RegionLocator r = table.getRegionLocator()) {
+      try(RegionLocator r = util.getConnection().getRegionLocator(TABLE_NAME)) {
         numRegions = r.getStartKeys().length;
-      }
-      assertEquals("Should make 5 regions", numRegions, 5);
 
-      // Generate the bulk load files
-      util.startMiniMapReduceCluster();
-      runIncrementalPELoad(conf, table, testDir);
-      // This doesn't write into the table, just makes files
-      assertEquals("HFOF should not touch actual table",
-          0, util.countRows(table));
+        assertEquals("Should make 5 regions", numRegions, 5);
 
+        // Generate the bulk load files
+        util.startMiniMapReduceCluster();
+        runIncrementalPELoad(conf, table, r, testDir);
+        // This doesn't write into the table, just makes files
+        assertEquals("HFOF should not touch actual table",
+            0, util.countRows(table));
 
-      // Make sure that a directory was created for every CF
-      int dir = 0;
-      for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
-        for (byte[] family : FAMILIES) {
-          if (Bytes.toString(family).equals(f.getPath().getName())) {
-            ++dir;
+
+
+        // Make sure that a directory was created for every CF
+        int dir = 0;
+        for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
+          for (byte[] family : FAMILIES) {
+            if (Bytes.toString(family).equals(f.getPath().getName())) {
+              ++dir;
+            }
           }
         }
-      }
-      assertEquals("Column family not found in FS.", FAMILIES.length, dir);
-
-      // handle the split case
-      if (shouldChangeRegions) {
-        LOG.info("Changing regions in table");
-        admin.disableTable(table.getName());
-        while(util.getMiniHBaseCluster().getMaster().getAssignmentManager().
-            getRegionStates().isRegionsInTransition()) {
-          Threads.sleep(200);
-          LOG.info("Waiting on table to finish disabling");
-        }
-        util.deleteTable(table.getName());
-        byte[][] newSplitKeys = generateRandomSplitKeys(14);
-        table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);
-        while (table.getRegionLocations().size() != 15 ||
-            !admin.isTableAvailable(table.getName())) {
-          Thread.sleep(200);
-          LOG.info("Waiting for new region assignment to happen");
+        assertEquals("Column family not found in FS.", FAMILIES.length, dir);
+
+        // handle the split case
+        if (shouldChangeRegions) {
+          LOG.info("Changing regions in table");
+          admin.disableTable(table.getName());
+          while(util.getMiniHBaseCluster().getMaster().getAssignmentManager().
+              getRegionStates().isRegionsInTransition()) {
+            Threads.sleep(200);
+            LOG.info("Waiting on table to finish disabling");
+          }
+          util.deleteTable(table.getName());
+          byte[][] newSplitKeys = generateRandomSplitKeys(14);
+          table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);
+          while (r.getAllRegionLocations().size() != 15 ||
+              !admin.isTableAvailable(table.getName())) {
+            Thread.sleep(200);
+            LOG.info("Waiting for new region assignment to happen");
+          }
         }
-      }
 
-      // Perform the actual load
-      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
+        // Perform the actual load
+        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);
+      }
 
       // Ensure data shows up
       int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
@@ -473,7 +475,7 @@ public class TestHFileOutputFormat  {
   }
 
   private void runIncrementalPELoad(
-      Configuration conf, HTable table, Path outDir)
+      Configuration conf, Table table, RegionLocator regionLocator, Path outDir)
   throws Exception {
     Job job = new Job(conf, "testLocalMRIncrementalLoad");
     job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
@@ -482,12 +484,12 @@ public class TestHFileOutputFormat  {
         KeyValueSerialization.class.getName());
     setupRandomGeneratorMapper(job);
     HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(),
-        table.getRegionLocator());
+        regionLocator);
     FileOutputFormat.setOutputPath(job, outDir);
 
     Assert.assertFalse( util.getTestFileSystem().exists(outDir)) ;
 
-    assertEquals(table.getRegionLocator().getAllRegionLocations().size(), job.getNumReduceTasks());
+    assertEquals(regionLocator.getAllRegionLocations().size(), job.getNumReduceTasks());
 
     assertTrue(job.waitForCompletion(true));
   }
@@ -910,7 +912,7 @@ public class TestHFileOutputFormat  {
       util.startMiniCluster();
       final FileSystem fs = util.getDFSCluster().getFileSystem();
       HBaseAdmin admin = util.getHBaseAdmin();
-      HTable table = util.createTable(TABLE_NAME, FAMILIES);
+      Table table = util.createTable(TABLE_NAME, FAMILIES);
       assertEquals("Should start with empty table", 0, util.countRows(table));
 
       // deep inspection: get the StoreFile dir
@@ -925,11 +927,13 @@ public class TestHFileOutputFormat  {
           true);
       util.startMiniMapReduceCluster();
 
-      for (int i = 0; i < 2; i++) {
-        Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
-        runIncrementalPELoad(conf, table, testDir);
-        // Perform the actual load
-        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
+      try (RegionLocator regionLocator = util.getConnection().getRegionLocator(TABLE_NAME)) {
+        for (int i = 0; i < 2; i++) {
+          Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
+          runIncrementalPELoad(conf, table, regionLocator, testDir);
+          // Perform the actual load
+          new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator);
+        }
       }
 
       // Ensure data shows up
@@ -978,7 +982,7 @@ public class TestHFileOutputFormat  {
       Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
       final FileSystem fs = util.getDFSCluster().getFileSystem();
       Admin admin = util.getHBaseAdmin();
-      HTable table = util.createTable(TABLE_NAME, FAMILIES);
+      Table table = util.createTable(TABLE_NAME, FAMILIES);
       assertEquals("Should start with empty table", 0, util.countRows(table));
 
       // deep inspection: get the StoreFile dir
@@ -1004,10 +1008,13 @@ public class TestHFileOutputFormat  {
       conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
           true);
       util.startMiniMapReduceCluster();
-      runIncrementalPELoad(conf, table, testDir);
 
-      // Perform the actual load
-      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
+      try (RegionLocator locator = util.getConnection().getRegionLocator(TABLE_NAME)) {
+        runIncrementalPELoad(conf, table, locator, testDir);
+
+        // Perform the actual load
+        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
+      }
 
       // Ensure data shows up
       int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
@@ -1066,17 +1073,17 @@ public class TestHFileOutputFormat  {
     if ("newtable".equals(args[0])) {
       TableName tname = TableName.valueOf(args[1]);
       byte[][] splitKeys = generateRandomSplitKeys(4);
-      HTable table = util.createTable(tname, FAMILIES, splitKeys);
+      Table table = util.createTable(tname, FAMILIES, splitKeys);
     } else if ("incremental".equals(args[0])) {
       TableName tname = TableName.valueOf(args[1]);
-      HTable table = (HTable) util.getConnection().getTable(tname);
+      Table table = util.getConnection().getTable(tname);
       Path outDir = new Path("incremental-out");
-      runIncrementalPELoad(conf, table, outDir);
+      try (RegionLocator locator = util.getConnection().getRegionLocator(tname)) {
+        runIncrementalPELoad(conf, table, locator, outDir);
+      }
     } else {
       throw new RuntimeException(
           "usage: TestHFileOutputFormat newtable | incremental");
     }
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 3b066f2..56e475b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HRegionLocator;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -388,25 +389,26 @@ public class TestHFileOutputFormat2  {
     Configuration conf = util.getConfiguration();
     byte[][] splitKeys = generateRandomSplitKeys(4);
     util.startMiniCluster();
-    try {
-      HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
-      Admin admin = table.getConnection().getAdmin();
+
+    Table table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
+    try (RegionLocator r = util.getConnection().getRegionLocator(TABLE_NAME)) {
+
+      Admin admin = util.getConnection().getAdmin();
       Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
       assertEquals("Should start with empty table",
           0, util.countRows(table));
-      int numRegions = -1;
-      try (RegionLocator r = table.getRegionLocator()) {
+      int numRegions;
+
         numRegions = r.getStartKeys().length;
-      }
-      assertEquals("Should make 5 regions", numRegions, 5);
 
-      // Generate the bulk load files
-      util.startMiniMapReduceCluster();
-      runIncrementalPELoad(conf, table.getTableDescriptor(), table.getRegionLocator(), testDir);
-      // This doesn't write into the table, just makes files
-      assertEquals("HFOF should not touch actual table",
-          0, util.countRows(table));
+        assertEquals("Should make 5 regions", numRegions, 5);
 
+        // Generate the bulk load files
+        util.startMiniMapReduceCluster();
+        runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir);
+        // This doesn't write into the table, just makes files
+        assertEquals("HFOF should not touch actual table",
+            0, util.countRows(table));
 
       // Make sure that a directory was created for every CF
       int dir = 0;
@@ -432,7 +434,8 @@ public class TestHFileOutputFormat2  {
         byte[][] newSplitKeys = generateRandomSplitKeys(14);
         table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);
 
-        while (table.getRegionLocator().getAllRegionLocations().size() != 15 ||
+        while (util.getConnection().getRegionLocator(TABLE_NAME)
+            .getAllRegionLocations().size() != 15 ||
             !admin.isTableAvailable(table.getName())) {
           Thread.sleep(200);
           LOG.info("Waiting for new region assignment to happen");
@@ -440,7 +443,7 @@ public class TestHFileOutputFormat2  {
       }
 
       // Perform the actual load
-      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
+      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);
 
       // Ensure data shows up
       int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
@@ -911,9 +914,10 @@ public class TestHFileOutputFormat2  {
 
     util.startMiniCluster();
     try (Connection conn = ConnectionFactory.createConnection();
-        Admin admin = conn.getAdmin()) {
+        Admin admin = conn.getAdmin();
+        Table table = util.createTable(TABLE_NAME, FAMILIES);
+        RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) {
       final FileSystem fs = util.getDFSCluster().getFileSystem();
-      HTable table = util.createTable(TABLE_NAME, FAMILIES);
       assertEquals("Should start with empty table", 0, util.countRows(table));
 
       // deep inspection: get the StoreFile dir
@@ -933,7 +937,7 @@ public class TestHFileOutputFormat2  {
         runIncrementalPELoad(conf, table.getTableDescriptor(), conn.getRegionLocator(TABLE_NAME),
             testDir);
         // Perform the actual load
-        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
+        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
       }
 
       // Ensure data shows up
@@ -1077,7 +1081,7 @@ public class TestHFileOutputFormat2  {
     if ("newtable".equals(args[0])) {
       TableName tname = TableName.valueOf(args[1]);
       byte[][] splitKeys = generateRandomSplitKeys(4);
-      try (HTable table = util.createTable(tname, FAMILIES, splitKeys)) {
+      try (Table table = util.createTable(tname, FAMILIES, splitKeys)) {
       }
     } else if ("incremental".equals(args[0])) {
       TableName tname = TableName.valueOf(args[1]);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java
index 762f530..1138d05 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -68,7 +69,7 @@ public class TestHashTable {
   
   @Test
   public void testHashTable() throws Exception {
-    final String tableName = "testHashTable";
+    final TableName tableName = TableName.valueOf("testHashTable");
     final byte[] family = Bytes.toBytes("family");
     final byte[] column1 = Bytes.toBytes("c1");
     final byte[] column2 = Bytes.toBytes("c2");
@@ -85,7 +86,7 @@ public class TestHashTable {
     
     long timestamp = 1430764183454L;
     // put rows into the first table
-    HTable t1 = TEST_UTIL.createTable(TableName.valueOf(tableName), family, splitRows);
+    Table t1 = TEST_UTIL.createTable(tableName, family, splitRows);
     for (int i = 0; i < numRows; i++) {
       Put p = new Put(Bytes.toBytes(i), timestamp);
       p.addColumn(family, column1, column1);
@@ -97,21 +98,21 @@ public class TestHashTable {
     
     HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration());
     
-    Path testDir = TEST_UTIL.getDataTestDirOnTestFS(tableName);
+    Path testDir = TEST_UTIL.getDataTestDirOnTestFS(tableName.getNameAsString());
     
     long batchSize = 300;
     int code = hashTable.run(new String[] { 
         "--batchsize=" + batchSize,
         "--numhashfiles=" + numHashFiles,
         "--scanbatch=2",
-        tableName,
+        tableName.getNameAsString(),
         testDir.toString()});
     assertEquals("test job failed", 0, code);
     
     FileSystem fs = TEST_UTIL.getTestFileSystem();
     
     HashTable.TableHash tableHash = HashTable.TableHash.read(fs.getConf(), testDir);
-    assertEquals(tableName, tableHash.tableName);
+    assertEquals(tableName.getNameAsString(), tableHash.tableName);
     assertEquals(batchSize, tableHash.batchSize);
     assertEquals(numHashFiles, tableHash.numHashFiles);
     assertEquals(numHashFiles - 1, tableHash.partitions.size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
index 7ec026f..7360a63 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java
@@ -110,32 +110,32 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
 
   @Test
   public void testMROnTable() throws Exception {
-    String tableName = "test-" + UUID.randomUUID();
+    TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
 
     // Prepare the arguments required for the test.
     String[] args = new String[] {
         "-D" + ImportTsv.MAPPER_CONF_KEY
             + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr",
         "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY",
-        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName };
+        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
     String data = "KEY\u001bVALUE1\u001bVALUE2\u001btest=>myvalue\n";
-    util.createTable(TableName.valueOf(tableName), FAMILY);
+    util.createTable(tableName, FAMILY);
     doMROnTableTest(util, FAMILY, data, args, 1, true);
     util.deleteTable(tableName);
   }
 
   @Test
   public void testMROnTableWithInvalidOperationAttr() throws Exception {
-    String tableName = "test-" + UUID.randomUUID();
+    TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
 
     // Prepare the arguments required for the test.
     String[] args = new String[] {
         "-D" + ImportTsv.MAPPER_CONF_KEY
             + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr",
         "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY",
-        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName };
+        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
     String data = "KEY\u001bVALUE1\u001bVALUE2\u001btest1=>myvalue\n";
-    util.createTable(TableName.valueOf(tableName), FAMILY);
+    util.createTable(tableName, FAMILY);
     doMROnTableTest(util, FAMILY, data, args, 1, false);
     util.deleteTable(tableName);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java
index 53bdf70..445620f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java
@@ -101,16 +101,16 @@ public class TestImportTSVWithTTLs implements Configurable {
 
   @Test
   public void testMROnTable() throws Exception {
-    String tableName = "test-" + UUID.randomUUID();
+    TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
 
     // Prepare the arguments required for the test.
     String[] args = new String[] {
         "-D" + ImportTsv.MAPPER_CONF_KEY
             + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper",
         "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_TTL",
-        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName };
+        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
     String data = "KEY\u001bVALUE1\u001bVALUE2\u001b1000000\n";
-    util.createTable(TableName.valueOf(tableName), FAMILY);
+    util.createTable(tableName, FAMILY);
     doMROnTableTest(util, FAMILY, data, args, 1);
     util.deleteTable(tableName);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
index 8d974ab..6f0696e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
@@ -154,16 +154,16 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
 
   @Test
   public void testMROnTable() throws Exception {
-    String tableName = "test-" + UUID.randomUUID();
+    TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
 
     // Prepare the arguments required for the test.
     String[] args = new String[] {
         "-D" + ImportTsv.MAPPER_CONF_KEY
             + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper",
         "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
-        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName };
+        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
     String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n";
-    util.createTable(TableName.valueOf(tableName), FAMILY);
+    util.createTable(tableName, FAMILY);
     doMROnTableTest(util, FAMILY, data, args, 1);
     util.deleteTable(tableName);
   }
@@ -222,25 +222,25 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
 
   @Test
   public void testMROnTableWithBulkload() throws Exception {
-    String tableName = "test-" + UUID.randomUUID();
-    Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName), "hfiles");
+    TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
+    Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles");
     // Prepare the arguments required for the test.
     String[] args = new String[] {
         "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(),
         "-D" + ImportTsv.COLUMNS_CONF_KEY
             + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
-        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName };
+        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
     String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n";
-    util.createTable(TableName.valueOf(tableName), FAMILY);
+    util.createTable(tableName, FAMILY);
     doMROnTableTest(util, FAMILY, data, args, 1);
     util.deleteTable(tableName);
   }
 
   @Test
   public void testBulkOutputWithTsvImporterTextMapper() throws Exception {
-    String table = "test-" + UUID.randomUUID();
+    TableName table = TableName.valueOf("test-" + UUID.randomUUID());
     String FAMILY = "FAM";
-    Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table),"hfiles");
+    Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()),"hfiles");
     // Prepare the arguments required for the test.
     String[] args =
         new String[] {
@@ -249,7 +249,8 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
             "-D" + ImportTsv.COLUMNS_CONF_KEY
                 + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
             "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
-            "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), table
+            "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(),
+            table.getNameAsString()
             };
     String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n";
     doMROnTableTest(util, FAMILY, data, args, 4);
@@ -258,17 +259,17 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
 
   @Test
   public void testMRWithOutputFormat() throws Exception {
-    String tableName = "test-" + UUID.randomUUID();
-    Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName), "hfiles");
+    TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
+    Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles");
     // Prepare the arguments required for the test.
     String[] args = new String[] {
         "-D" + ImportTsv.MAPPER_CONF_KEY
             + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper",
         "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(),
         "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
-        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName };
+        "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
     String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n";
-    util.createTable(TableName.valueOf(tableName), FAMILY);
+    util.createTable(tableName, FAMILY);
     doMROnTableTest(util, FAMILY, data, args, 1);
     util.deleteTable(tableName);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
index 9f36587..ca19af4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
@@ -125,7 +125,7 @@ public class TestImportTsv implements Configurable {
     doMROnTableTest(null, 1);
     util.deleteTable(table);
   }
-  
+
   @Test
   public void testMROnTableWithTimestamp() throws Exception {
     util.createTable(TableName.valueOf(table), FAMILY);
@@ -147,7 +147,7 @@ public class TestImportTsv implements Configurable {
     doMROnTableTest(null, 3);
     util.deleteTable(table);
   }
-  
+
   @Test
   public void testBulkOutputWithoutAnExistingTable() throws Exception {
     // Prepare the arguments required for the test.
@@ -169,7 +169,7 @@ public class TestImportTsv implements Configurable {
     doMROnTableTest(null, 3);
     util.deleteTable(table);
   }
-  
+
   @Test
   public void testBulkOutputWithAnExistingTableNoStrictTrue() throws Exception {
     util.createTable(TableName.valueOf(table), FAMILY);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index 2ca0ca5..80cbb70 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -177,8 +178,10 @@ public class TestLoadIncrementalHFilesSplitRecovery {
     // create HFiles for different column families
     LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration());
     Path bulk1 = buildBulkFiles(table, value);
-    try (Table t = connection.getTable(table)) {
-      lih.doBulkLoad(bulk1, (HTable)t);
+    try (Table t = connection.getTable(table);
+        RegionLocator locator = connection.getRegionLocator(table);
+        Admin admin = connection.getAdmin()) {
+        lih.doBulkLoad(bulk1, admin, t, locator);
     }
   }
 
@@ -223,7 +226,7 @@ public class TestLoadIncrementalHFilesSplitRecovery {
   @BeforeClass
   public static void setupCluster() throws Exception {
     util = new HBaseTestingUtility();
-    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,"");
+    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
     util.startMiniCluster(1);
   }
 
@@ -298,8 +301,10 @@ public class TestLoadIncrementalHFilesSplitRecovery {
       try {
         // create HFiles for different column families
         Path dir = buildBulkFiles(table, 1);
-        try (Table t = connection.getTable(table)) {
-          lih.doBulkLoad(dir, (HTable)t);
+        try (Table t = connection.getTable(table);
+            RegionLocator locator = connection.getRegionLocator(table);
+            Admin admin = connection.getAdmin()) {
+          lih.doBulkLoad(dir, admin, t, locator);
         }
       } finally {
         util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
@@ -364,9 +369,11 @@ public class TestLoadIncrementalHFilesSplitRecovery {
       };
 
       // create HFiles for different column families
-      try (Table t = connection.getTable(table)) {
+      try (Table t = connection.getTable(table);
+          RegionLocator locator = connection.getRegionLocator(table);
+          Admin admin = connection.getAdmin()) {
         Path bulk = buildBulkFiles(table, 2);
-        lih2.doBulkLoad(bulk, (HTable)t);
+        lih2.doBulkLoad(bulk, admin, t, locator);
       }
 
       // check that data was loaded
@@ -408,8 +415,10 @@ public class TestLoadIncrementalHFilesSplitRecovery {
 
       // create HFiles for different column families
       Path bulk = buildBulkFiles(table, 2);
-      try (Table t = connection.getTable(table)) {
-        lih.doBulkLoad(bulk, (HTable)t);
+      try (Table t = connection.getTable(table);
+          RegionLocator locator = connection.getRegionLocator(table);
+          Admin admin = connection.getAdmin()) {
+        lih.doBulkLoad(bulk, admin, t, locator);
       }
       assertExpectedTable(connection, table, ROWCOUNT, 2);
       assertEquals(20, countedLqis.get());
@@ -446,8 +455,10 @@ public class TestLoadIncrementalHFilesSplitRecovery {
 
       // create HFiles for different column families
       Path dir = buildBulkFiles(table,1);
-      try (Table t = connection.getTable(table)) {
-        lih.doBulkLoad(dir, (HTable)t);
+      try (Table t = connection.getTable(table);
+          RegionLocator locator = connection.getRegionLocator(table);
+          Admin admin = connection.getAdmin()) {
+        lih.doBulkLoad(dir, admin, t, locator);
       }
     }
 
@@ -472,7 +483,7 @@ public class TestLoadIncrementalHFilesSplitRecovery {
 
       protected List<LoadQueueItem> groupOrSplit(
           Multimap<ByteBuffer, LoadQueueItem> regionGroups,
-          final LoadQueueItem item, final HTable htable,
+          final LoadQueueItem item, final Table htable,
           final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
         List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
         if (lqis != null) {
@@ -483,8 +494,10 @@ public class TestLoadIncrementalHFilesSplitRecovery {
     };
 
     // do bulkload when there is no region hole in hbase:meta.
-    try {
-      loader.doBulkLoad(dir, (HTable)table);
+    try (Table t = connection.getTable(tableName);
+        RegionLocator locator = connection.getRegionLocator(tableName);
+        Admin admin = connection.getAdmin()) {
+      loader.doBulkLoad(dir, admin, t, locator);
     } catch (Exception e) {
       LOG.error("exeception=", e);
     }
@@ -502,10 +515,12 @@ public class TestLoadIncrementalHFilesSplitRecovery {
       }
     }
 
-    try {
-      loader.doBulkLoad(dir, (HTable)table);
+    try (Table t = connection.getTable(tableName);
+        RegionLocator locator = connection.getRegionLocator(tableName);
+        Admin admin = connection.getAdmin()) {
+      loader.doBulkLoad(dir, admin, t, locator);
     } catch (Exception e) {
-      LOG.error("exeception=", e);
+      LOG.error("exception=", e);
       assertTrue("IOException expected", e instanceof IOException);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java
index 1bfd14a..98b7e42 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.mapreduce;
 
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
@@ -40,9 +41,9 @@ public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase {
   @BeforeClass
   public static void setupLogging() {
     TEST_UTIL.enableDebug(MultiTableInputFormat.class);
-  }
+      }
 
-  @Override
+    @Override
   protected void initJob(List<Scan> scans, Job job) throws IOException {
     TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class,
         ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
index 6180632..8dce0ed 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java
@@ -70,7 +70,7 @@ public class TestMultithreadedTableMapper {
   @BeforeClass
   public static void beforeClass() throws Exception {
     UTIL.startMiniCluster();
-    HTable table =
+    Table table =
         UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY,
             OUTPUT_FAMILY });
     UTIL.loadTable(table, INPUT_FAMILY, false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
index 80af874..2024429 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
@@ -155,7 +155,7 @@ public class TestRowCounter {
     long ts;
 
     // clean up content of TABLE_NAME
-    HTable table = TEST_UTIL.deleteTableData(TableName.valueOf(TABLE_NAME));
+    Table table = TEST_UTIL.deleteTableData(TableName.valueOf(TABLE_NAME));
     ts = System.currentTimeMillis();
     put1.add(family, col1, ts, Bytes.toBytes("val1"));
     table.put(put1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java
index a86270f..94348b5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java
@@ -80,8 +80,8 @@ public class TestSyncTable {
   
   @Test
   public void testSyncTable() throws Exception {
-    String sourceTableName = "testSourceTable";
-    String targetTableName = "testTargetTable";
+    TableName sourceTableName = TableName.valueOf("testSourceTable");
+    TableName targetTableName = TableName.valueOf("testTargetTable");
     Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTable");
     
     writeTestData(sourceTableName, targetTableName);
@@ -101,10 +101,10 @@ public class TestSyncTable {
     TEST_UTIL.cleanupDataTestDirOnTestFS();
   }
 
-  private void assertEqualTables(int expectedRows, String sourceTableName, String targetTableName) 
-      throws Exception {
-    Table sourceTable = TEST_UTIL.getConnection().getTable(TableName.valueOf(sourceTableName));
-    Table targetTable = TEST_UTIL.getConnection().getTable(TableName.valueOf(targetTableName));
+  private void assertEqualTables(int expectedRows, TableName sourceTableName,
+      TableName targetTableName) throws Exception {
+    Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName);
+    Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName);
     
     ResultScanner sourceScanner = sourceTable.getScanner(new Scan());
     ResultScanner targetScanner = targetTable.getScanner(new Scan());
@@ -177,13 +177,13 @@ public class TestSyncTable {
     targetTable.close();
   }
 
-  private Counters syncTables(String sourceTableName, String targetTableName,
+  private Counters syncTables(TableName sourceTableName, TableName targetTableName,
       Path testDir) throws Exception {
     SyncTable syncTable = new SyncTable(TEST_UTIL.getConfiguration());
     int code = syncTable.run(new String[] { 
         testDir.toString(),
-        sourceTableName,
-        targetTableName
+        sourceTableName.getNameAsString(),
+        targetTableName.getNameAsString()
         });
     assertEquals("sync table job failed", 0, code);
     
@@ -191,7 +191,7 @@ public class TestSyncTable {
     return syncTable.counters;
   }
 
-  private void hashSourceTable(String sourceTableName, Path testDir)
+  private void hashSourceTable(TableName sourceTableName, Path testDir)
       throws Exception, IOException {
     int numHashFiles = 3;
     long batchSize = 100;  // should be 2 batches per region
@@ -201,14 +201,14 @@ public class TestSyncTable {
         "--batchsize=" + batchSize,
         "--numhashfiles=" + numHashFiles,
         "--scanbatch=" + scanBatch,
-        sourceTableName,
+        sourceTableName.getNameAsString(),
         testDir.toString()});
     assertEquals("hash table job failed", 0, code);
     
     FileSystem fs = TEST_UTIL.getTestFileSystem();
     
     HashTable.TableHash tableHash = HashTable.TableHash.read(fs.getConf(), testDir);
-    assertEquals(sourceTableName, tableHash.tableName);
+    assertEquals(sourceTableName.getNameAsString(), tableHash.tableName);
     assertEquals(batchSize, tableHash.batchSize);
     assertEquals(numHashFiles, tableHash.numHashFiles);
     assertEquals(numHashFiles - 1, tableHash.partitions.size());
@@ -216,7 +216,7 @@ public class TestSyncTable {
     LOG.info("Hash table completed");
   }
 
-  private void writeTestData(String sourceTableName, String targetTableName)
+  private void writeTestData(TableName sourceTableName, TableName targetTableName)
       throws Exception {
     final byte[] family = Bytes.toBytes("family");
     final byte[] column1 = Bytes.toBytes("c1");
@@ -229,10 +229,10 @@ public class TestSyncTable {
     int sourceRegions = 10;
     int targetRegions = 6;
     
-    HTable sourceTable = TEST_UTIL.createTable(TableName.valueOf(sourceTableName),
+    Table sourceTable = TEST_UTIL.createTable(sourceTableName,
         family, generateSplits(numRows, sourceRegions));
 
-    HTable targetTable = TEST_UTIL.createTable(TableName.valueOf(targetTableName),
+    Table targetTable = TEST_UTIL.createTable(targetTableName,
         family, generateSplits(numRows, targetRegions));
 
     long timestamp = 1430764183454L;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java
index 5cca54b..33d1a66 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.NullWritable;
@@ -63,12 +64,12 @@ public abstract class TestTableInputFormatScanBase {
   private static final Log LOG = LogFactory.getLog(TestTableInputFormatScanBase.class);
   static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
-  static final byte[] TABLE_NAME = Bytes.toBytes("scantest");
+  static final TableName TABLE_NAME = TableName.valueOf("scantest");
   static final byte[] INPUT_FAMILY = Bytes.toBytes("contents");
   static final String KEY_STARTROW = "startRow";
   static final String KEY_LASTROW = "stpRow";
 
-  private static HTable table = null;
+  private static Table table = null;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -82,7 +83,7 @@ public abstract class TestTableInputFormatScanBase {
     // start mini hbase cluster
     TEST_UTIL.startMiniCluster(3);
     // create and fill table
-    table = TEST_UTIL.createMultiRegionTable(TableName.valueOf(TABLE_NAME), INPUT_FAMILY);
+    table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, INPUT_FAMILY);
     TEST_UTIL.loadTable(table, INPUT_FAMILY, false);
     // start MR cluster
     TEST_UTIL.startMiniMapReduceCluster();
@@ -182,7 +183,7 @@ public abstract class TestTableInputFormatScanBase {
     String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase() : "Empty") +
       "To" + (stop != null ? stop.toUpperCase() : "Empty");
     Configuration c = new Configuration(TEST_UTIL.getConfiguration());
-    c.set(TableInputFormat.INPUT_TABLE, Bytes.toString(TABLE_NAME));
+    c.set(TableInputFormat.INPUT_TABLE, TABLE_NAME.getNameAsString());
     c.set(TableInputFormat.SCAN_COLUMN_FAMILY, Bytes.toString(INPUT_FAMILY));
     c.set(KEY_STARTROW, start != null ? start : "");
     c.set(KEY_LASTROW, last != null ? last : "");
@@ -233,7 +234,7 @@ public abstract class TestTableInputFormatScanBase {
     LOG.info("scan before: " + scan);
     Job job = new Job(c, jobName);
     TableMapReduceUtil.initTableMapperJob(
-      Bytes.toString(TABLE_NAME), scan, ScanMapper.class,
+      TABLE_NAME, scan, ScanMapper.class,
       ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
     job.setReducerClass(ScanReducer.class);
     job.setNumReduceTasks(1); // one to get final "first" and "last" key
@@ -264,11 +265,11 @@ public abstract class TestTableInputFormatScanBase {
     c.set(KEY_STARTROW, "");
     c.set(KEY_LASTROW, "");
     Job job = new Job(c, jobName);
-    TableMapReduceUtil.initTableMapperJob(Bytes.toString(TABLE_NAME), scan, ScanMapper.class,
+    TableMapReduceUtil.initTableMapperJob(TABLE_NAME.getNameAsString(), scan, ScanMapper.class,
             ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
     TableInputFormat tif = new TableInputFormat();
     tif.setConf(job.getConfiguration());
-    Assert.assertEquals(new String(TABLE_NAME), new String(table.getTableName()));
+    Assert.assertEquals(TABLE_NAME, table.getName());
     List<InputSplit> splits = tif.getSplits(job);
     Assert.assertEquals(expectedNumOfSplits, splits.size());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
index 2972222..572d9ae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java
@@ -76,7 +76,7 @@ public abstract class TestTableMapReduceBase {
   @BeforeClass
   public static void beforeClass() throws Exception {
     UTIL.startMiniCluster();
-    HTable table =
+    Table table =
         UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY,
             OUTPUT_FAMILY });
     UTIL.loadTable(table, INPUT_FAMILY, false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
index 323957e..6b68bfe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
@@ -176,9 +176,9 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testAssignRegion() throws Exception {
-    String table = "testAssignRegion";
+    TableName table = TableName.valueOf("testAssignRegion");
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -203,7 +203,7 @@ public class TestAssignmentManagerOnCluster {
       RegionState newState = regionStates.getRegionState(hri);
       assertTrue(newState.isOpened());
     } finally {
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
     }
   }
 
@@ -212,7 +212,7 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=120000)
   public void testAssignRegionOnRestartedServer() throws Exception {
-    String table = "testAssignRegionOnRestartedServer";
+    TableName table = TableName.valueOf("testAssignRegionOnRestartedServer");
     TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20);
     TEST_UTIL.getMiniHBaseCluster().stopMaster(0);
     //restart the master so that conf take into affect
@@ -221,7 +221,7 @@ public class TestAssignmentManagerOnCluster {
     ServerName deadServer = null;
     HMaster master = null;
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -260,7 +260,7 @@ public class TestAssignmentManagerOnCluster {
         master.serverManager.expireServer(deadServer);
       }
 
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
 
       // reset the value for other tests
       TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 3);
@@ -431,9 +431,9 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testAssignWhileClosing() throws Exception {
-    String table = "testAssignWhileClosing";
+    TableName table = TableName.valueOf("testAssignWhileClosing");
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -471,7 +471,7 @@ public class TestAssignmentManagerOnCluster {
       TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 200);
     } finally {
       MyRegionObserver.preCloseEnabled.set(false);
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
     }
   }
 
@@ -480,9 +480,9 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testCloseFailed() throws Exception {
-    String table = "testCloseFailed";
+    TableName table = TableName.valueOf("testCloseFailed");
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -517,7 +517,7 @@ public class TestAssignmentManagerOnCluster {
       TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
     } finally {
       MyRegionObserver.preCloseEnabled.set(false);
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
     }
   }
 
@@ -526,9 +526,9 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testOpenFailed() throws Exception {
-    String table = "testOpenFailed";
+    TableName table = TableName.valueOf("testOpenFailed");
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -558,7 +558,7 @@ public class TestAssignmentManagerOnCluster {
       TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
     } finally {
       MyLoadBalancer.controledRegion = null;
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
     }
   }
 
@@ -654,9 +654,9 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testCloseHang() throws Exception {
-    String table = "testCloseHang";
+    TableName table = TableName.valueOf("testCloseHang");
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -690,7 +690,7 @@ public class TestAssignmentManagerOnCluster {
       TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
     } finally {
       MyRegionObserver.postCloseEnabled.set(false);
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
     }
   }
 
@@ -699,9 +699,9 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testOpenCloseRacing() throws Exception {
-    String table = "testOpenCloseRacing";
+    TableName table = TableName.valueOf("testOpenCloseRacing");
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -761,7 +761,7 @@ public class TestAssignmentManagerOnCluster {
       TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 6000);
     } finally {
       MyRegionObserver.postOpenEnabled.set(false);
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
     }
   }
 
@@ -770,11 +770,11 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testAssignRacingWithSSH() throws Exception {
-    String table = "testAssignRacingWithSSH";
+    TableName table = TableName.valueOf("testAssignRacingWithSSH");
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     MyMaster master = null;
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -836,7 +836,7 @@ public class TestAssignmentManagerOnCluster {
       if (master != null) {
         master.enableSSH(true);
       }
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
       cluster.startRegionServer();
     }
   }
@@ -939,11 +939,11 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testAssignOfflinedRegionBySSH() throws Exception {
-    String table = "testAssignOfflinedRegionBySSH";
+    TableName table = TableName.valueOf("testAssignOfflinedRegionBySSH");
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     MyMaster master = null;
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -1008,7 +1008,7 @@ public class TestAssignmentManagerOnCluster {
       TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 200);
     } finally {
       MyRegionServer.abortedServer = null;
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
       cluster.startRegionServer();
     }
   }
@@ -1018,11 +1018,11 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test (timeout=60000)
   public void testAssignDisabledRegionBySSH() throws Exception {
-    String table = "testAssignDisabledRegionBySSH";
+    TableName table = TableName.valueOf("testAssignDisabledRegionBySSH");
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-    MyMaster master = null;
+    MyMaster master;
     try {
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
 
@@ -1085,7 +1085,7 @@ public class TestAssignmentManagerOnCluster {
       assertTrue(regionStates.isRegionOffline(hri));
     } finally {
       MyRegionServer.abortedServer = null;
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
       cluster.startRegionServer();
     }
   }
@@ -1095,10 +1095,10 @@ public class TestAssignmentManagerOnCluster {
    */
   @Test(timeout = 60000)
   public void testReportRegionStateTransition() throws Exception {
-    String table = "testReportRegionStateTransition";
+    TableName table = TableName.valueOf("testReportRegionStateTransition");
     try {
       MyRegionServer.simulateRetry = true;
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
+      HTableDescriptor desc = new HTableDescriptor(table);
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc);
       Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
@@ -1114,13 +1114,13 @@ public class TestAssignmentManagerOnCluster {
       // Assert the the region is actually open on the server
       TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
       // Closing region should just work fine
-      admin.disableTable(TableName.valueOf(table));
+      admin.disableTable(table);
       assertTrue(regionStates.isRegionOffline(hri));
       List<HRegionInfo> regions = TEST_UTIL.getHBaseAdmin().getOnlineRegions(serverName);
       assertTrue(!regions.contains(hri));
     } finally {
       MyRegionServer.simulateRetry = false;
-      TEST_UTIL.deleteTable(Bytes.toBytes(table));
+      TEST_UTIL.deleteTable(table);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index bc437fc..813eb49 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -343,10 +343,11 @@ public class TestDistributedLogSplitting {
     master.balanceSwitch(false);
 
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
-    HTable ht = installTable(zkw, TABLE_NAME, FAMILY_NAME, NUM_REGIONS_TO_CREATE);
+    Table ht = installTable(zkw, TABLE_NAME, FAMILY_NAME, NUM_REGIONS_TO_CREATE);
     NonceGeneratorWithDups ng = new NonceGeneratorWithDups();
     NonceGenerator oldNg =
-        ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection)ht.getConnection(), ng);
+        ConnectionUtils.injectNonceGeneratorForTesting(
+            (ClusterConnection)TEST_UTIL.getConnection(), ng);
 
     try {
       List<Increment> reqs = new ArrayList<Increment>();
@@ -380,7 +381,8 @@ public class TestDistributedLogSplitting {
         }
       }
     } finally {
-      ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection) ht.getConnection(), oldNg);
+      ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection)
+              TEST_UTIL.getConnection(), oldNg);
       ht.close();
       zkw.close();
     }
@@ -711,7 +713,7 @@ public class TestDistributedLogSplitting {
 
     List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
-    HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
+    Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
 
     List<HRegionInfo> regions = null;
     HRegionServer hrs = null;
@@ -901,7 +903,7 @@ public class TestDistributedLogSplitting {
 
     List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
-    HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
+    Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
     final SplitLogManager slm = master.getMasterFileSystem().splitLogManager;
 
     Set<HRegionInfo> regionSet = new HashSet<HRegionInfo>();
@@ -1399,7 +1401,7 @@ public class TestDistributedLogSplitting {
     LOG.info("testReadWriteSeqIdFiles");
     startCluster(2);
     final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
-    HTable ht = installTable(zkw, "table", "family", 10);
+    Table ht = installTable(zkw, "table", "family", 10);
     FileSystem fs = master.getMasterFileSystem().getFileSystem();
     Path tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), TableName.valueOf("table"));
     List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
@@ -1425,19 +1427,19 @@ public class TestDistributedLogSplitting {
     ht.close();
   } 
   
-  HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs) throws Exception {
+  Table installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs) throws Exception {
     return installTable(zkw, tname, fname, nrs, 0);
   }
 
-  HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs,
+  Table installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs,
       int existingRegions) throws Exception {
     // Create a table with regions
     TableName table = TableName.valueOf(tname);
     byte [] family = Bytes.toBytes(fname);
     LOG.info("Creating table with " + nrs + " regions");
-    HTable ht = TEST_UTIL.createMultiRegionTable(table, family, nrs);
+    Table ht = TEST_UTIL.createMultiRegionTable(table, family, nrs);
     int numRegions = -1;
-    try (RegionLocator r = ht.getRegionLocator()) {
+    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) {
       numRegions = r.getStartKeys().length;
     }
     assertEquals(nrs, numRegions);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java
index abb6520..d2bb11a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -72,9 +73,8 @@ public class TestGetLastFlushedSequenceId {
   public void test() throws IOException, InterruptedException {
     testUtil.getHBaseAdmin().createNamespace(
       NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
-    HTable table = testUtil.createTable(tableName, families);
+    Table table = testUtil.createTable(tableName, families);
     table.put(new Put(Bytes.toBytes("k")).add(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
-    table.flushCommits();
     MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
     List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
     Region region = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 8028756..27ee31b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -83,7 +84,7 @@ public class TestMaster {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     HMaster m = cluster.getMaster();
 
-    try (HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {
+    try (Table ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {
       assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME,
         TableState.State.ENABLED));
       TEST_UTIL.loadTable(ht, FAMILYNAME, false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
index 20e0e54..3a4075b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -67,9 +68,9 @@ public class TestMasterRestartAfterDisablingTable {
     TableName table = TableName.valueOf("tableRestart");
     byte[] family = Bytes.toBytes("family");
     log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
-    HTable ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
+    Table ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
     int numRegions = -1;
-    try (RegionLocator r = ht.getRegionLocator()) {
+    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) {
       numRegions = r.getStartKeys().length;
     }
     numRegions += 1; // catalogs

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
index a09c5f2..7cea0df 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
@@ -66,9 +66,9 @@ public class TestMasterTransitions {
     TEST_UTIL.startMiniCluster(2);
     // Create a table of three families.  This will assign a region.
     TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES);
-    HTable t = (HTable) TEST_UTIL.getConnection().getTable(TABLENAME);
+    Table t = TEST_UTIL.getConnection().getTable(TABLENAME);
     int countOfRegions = -1;
-    try (RegionLocator r = t.getRegionLocator()) {
+    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(TABLENAME)) {
       countOfRegions = r.getStartKeys().length;
     }
     TEST_UTIL.waitUntilAllRegionsAssigned(TABLENAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
index 9cbf680..69c5f89 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -50,7 +51,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
@@ -546,11 +549,10 @@ public class TestRegionPlacement {
     desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
     admin.createTable(desc, splitKeys);
 
-    HTable ht = (HTable) CONNECTION.getTable(tableName);
-    @SuppressWarnings("deprecation")
-    Map<HRegionInfo, ServerName> regions = ht.getRegionLocations();
-    assertEquals("Tried to create " + expectedRegions + " regions "
-        + "but only found " + regions.size(), expectedRegions, regions.size());
-    ht.close();
+    try (RegionLocator r = CONNECTION.getRegionLocator(tableName)) {
+      List<HRegionLocation> regions = r.getAllRegionLocations();
+      assertEquals("Tried to create " + expectedRegions + " regions "
+          + "but only found " + regions.size(), expectedRegions, regions.size());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
index 72a0e0c..3d12c12 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -78,9 +79,9 @@ public class  TestRollingRestart {
     TableName table = TableName.valueOf("tableRestart");
     byte [] family = Bytes.toBytes("family");
     log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
-    HTable ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
+    Table ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
     int numRegions = -1;
-    try (RegionLocator r = ht.getRegionLocator()) {
+    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) {
       numRegions = r.getStartKeys().length;
     }
     numRegions += 1; // catalogs

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
index a2f08ab..3c070e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
@@ -288,12 +288,12 @@ public class TestSnapshotFromMaster {
     UTIL.deleteTable(TABLE_NAME);
     HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
     htd.setCompactionEnabled(false);
-    UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
+    UTIL.createTable(htd, new byte[][] { TEST_FAM }, null);
     // load the table (creates 4 hfiles)
     UTIL.loadTable(UTIL.getConnection().getTable(TABLE_NAME), TEST_FAM);
     UTIL.flush(TABLE_NAME);
     // Put some more data into the table so for sure we get more storefiles.
-    UTIL.loadTable((HTable) UTIL.getConnection().getTable(TABLE_NAME), TEST_FAM);
+    UTIL.loadTable(UTIL.getConnection().getTable(TABLE_NAME), TEST_FAM);
 
     // disable the table so we can take a snapshot
     admin.disableTable(TABLE_NAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
index 50ab46d..52bfba8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
@@ -446,7 +446,7 @@ public class TestNamespaceAuditor {
     // This call will pass.
     ADMIN.createTable(tableDescOne);
     Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
-    HTable htable = (HTable)connection.getTable(tableOne);
+    Table htable = connection.getTable(tableOne);
     UTIL.loadNumericRows(htable, Bytes.toBytes("info"), 1, 1000);
     ADMIN.flush(tableOne);
     stateInfo = getNamespaceState(nsp1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
index d19906b..6d2dd3e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -66,7 +67,7 @@ public class TestQuotaThrottle {
   };
 
   private static ManualEnvironmentEdge envEdge;
-  private static HTable[] tables;
+  private static Table[] tables;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -81,7 +82,7 @@ public class TestQuotaThrottle {
     TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
     QuotaCache.TEST_FORCE_REFRESH = true;
 
-    tables = new HTable[TABLE_NAMES.length];
+    tables = new Table[TABLE_NAMES.length];
     for (int i = 0; i < TABLE_NAMES.length; ++i) {
       tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
     }
@@ -505,13 +506,13 @@ public class TestQuotaThrottle {
     assertEquals(30, doGets(30, tables[1]));
   }
 
-  private int doPuts(int maxOps, final HTable... tables) throws Exception {
+  private int doPuts(int maxOps, final Table... tables) throws Exception {
     int count = 0;
     try {
       while (count < maxOps) {
         Put put = new Put(Bytes.toBytes("row-" + count));
         put.add(FAMILY, QUALIFIER, Bytes.toBytes("data-" + count));
-        for (final HTable table: tables) {
+        for (final Table table: tables) {
           table.put(put);
         }
         count += tables.length;
@@ -527,12 +528,12 @@ public class TestQuotaThrottle {
     return count;
   }
 
-  private long doGets(int maxOps, final HTable... tables) throws Exception {
+  private long doGets(int maxOps, final Table... tables) throws Exception {
     int count = 0;
     try {
       while (count < maxOps) {
         Get get = new Get(Bytes.toBytes("row-" + count));
-        for (final HTable table: tables) {
+        for (final Table table: tables) {
           table.get(get);
         }
         count += tables.length;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b58530/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index be43950..cd87344 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -94,7 +95,7 @@ public class TestEndToEndSplitTransaction {
   public void testMasterOpsWhileSplitting() throws Exception {
     TableName tableName = TableName.valueOf("TestSplit");
     byte[] familyName = Bytes.toBytes("fam");
-    try (HTable ht = TEST_UTIL.createTable(tableName, familyName)) {
+    try (Table ht = TEST_UTIL.createTable(tableName, familyName)) {
       TEST_UTIL.loadTable(ht, familyName, false);
     }
     HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
@@ -318,19 +319,23 @@ public class TestEndToEndSplitTransaction {
 
     /** verify region boundaries obtained from HTable.getStartEndKeys() */
     void verifyRegionsUsingHTable() throws IOException {
-      HTable table = null;
+      Table table = null;
       try {
         //HTable.getStartEndKeys()
-        table = (HTable) connection.getTable(tableName);
-        Pair<byte[][], byte[][]> keys = table.getRegionLocator().getStartEndKeys();
-        verifyStartEndKeys(keys);
-
-        //HTable.getRegionsInfo()
-        Set<HRegionInfo> regions = new TreeSet<HRegionInfo>();
-        for (HRegionLocation loc : table.getRegionLocator().getAllRegionLocations()) {
-          regions.add(loc.getRegionInfo());
+        table = connection.getTable(tableName);
+
+        try(RegionLocator rl = connection.getRegionLocator(tableName)) {
+          Pair<byte[][], byte[][]> keys = rl.getStartEndKeys();
+          verifyStartEndKeys(keys);
+
+          //HTable.getRegionsInfo()
+          Set<HRegionInfo> regions = new TreeSet<HRegionInfo>();
+          for (HRegionLocation loc : rl.getAllRegionLocations()) {
+            regions.add(loc.getRegionInfo());
+          }
+          verifyTableRegions(regions);
         }
-        verifyTableRegions(regions);
+
       } finally {
         IOUtils.closeQuietly(table);
       }