You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/08/24 05:06:43 UTC

[2/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information This reverts commit b03348630c145aa6cc29f0f295442c6deb28a28e.

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 979a351..fce4eaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -89,7 +89,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   protected final SnapshotManifest snapshotManifest;
   protected final SnapshotManager snapshotManager;
 
-  protected TableDescriptor htd;
+  protected HTableDescriptor htd;
 
   /**
    * @param snapshot descriptor of the snapshot to take
@@ -124,12 +124,12 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
   }
 
-  private TableDescriptor loadTableDescriptor()
+  private HTableDescriptor loadTableDescriptor()
       throws FileNotFoundException, IOException {
-    TableDescriptor htd =
+    HTableDescriptor htd =
       this.master.getTableDescriptors().get(snapshotTable);
     if (htd == null) {
-      throw new IOException("TableDescriptor missing for " + snapshotTable);
+      throw new IOException("HTableDescriptor missing for " + snapshotTable);
     }
     return htd;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index b1d1415..d4a54bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -59,7 +59,7 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
    * @param tableName The current table name.
    * @param family The current family.
    */
-  public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor family) throws IOException {
+  public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) throws IOException {
     Configuration conf = getConf();
     TableName tn = TableName.valueOf(tableName);
     FileSystem fs = FileSystem.get(conf);
@@ -98,8 +98,8 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
     Connection connection = ConnectionFactory.createConnection(getConf());
     Admin admin = connection.getAdmin();
     try {
-      TableDescriptor htd = admin.listTableDescriptor(tn);
-      ColumnFamilyDescriptor family = htd.getColumnFamily(Bytes.toBytes(familyName));
+      HTableDescriptor htd = admin.getTableDescriptor(tn);
+      HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
       if (family == null || !family.isMobEnabled()) {
         throw new IOException("Column family " + familyName + " is not a MOB column family");
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 4273098..80bda28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -44,8 +44,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
@@ -55,7 +57,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.crypto.Encryption;
@@ -285,7 +286,7 @@ public final class MobUtils {
    * @throws IOException
    */
   public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableName tableName,
-      ColumnFamilyDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
+      HColumnDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
       throws IOException {
     long timeToLive = columnDescriptor.getTimeToLive();
     if (Integer.MAX_VALUE == timeToLive) {
@@ -518,7 +519,7 @@ public final class MobUtils {
    * @throws IOException
    */
   public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
-      ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
+      HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
       Compression.Algorithm compression, String startKey, CacheConfig cacheConfig,
       Encryption.Context cryptoContext, boolean isCompaction)
       throws IOException {
@@ -542,7 +543,7 @@ public final class MobUtils {
    * @throws IOException
    */
   public static StoreFileWriter createRefFileWriter(Configuration conf, FileSystem fs,
-    ColumnFamilyDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
+    HColumnDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
     Encryption.Context cryptoContext, boolean isCompaction)
     throws IOException {
     return createWriter(conf, fs, family,
@@ -569,7 +570,7 @@ public final class MobUtils {
    * @throws IOException
    */
   public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
-      ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
+      HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
       Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
       Encryption.Context cryptoContext, boolean isCompaction)
       throws IOException {
@@ -595,7 +596,7 @@ public final class MobUtils {
    * @throws IOException
    */
   public static StoreFileWriter createDelFileWriter(Configuration conf, FileSystem fs,
-      ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
+      HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
       Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
       Encryption.Context cryptoContext)
       throws IOException {
@@ -622,7 +623,7 @@ public final class MobUtils {
    * @throws IOException
    */
   public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
-                                             ColumnFamilyDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
+      HColumnDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
       Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext,
       boolean isCompaction)
       throws IOException {
@@ -796,7 +797,7 @@ public final class MobUtils {
    * @param allFiles Whether add all mob files into the compaction.
    */
   public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
-                                     ColumnFamilyDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
+    HColumnDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
       throws IOException {
     String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY,
         PartitionedMobCompactor.class.getName());
@@ -804,7 +805,7 @@ public final class MobUtils {
     MobCompactor compactor = null;
     try {
       compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
-        Configuration.class, FileSystem.class, TableName.class, ColumnFamilyDescriptor.class,
+        Configuration.class, FileSystem.class, TableName.class, HColumnDescriptor.class,
         ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool });
     } catch (Exception e) {
       throw new IOException("Unable to load configured mob file compactor '" + className + "'", e);
@@ -856,9 +857,9 @@ public final class MobUtils {
    * @param htd The current table descriptor.
    * @return Whether this table has mob-enabled columns.
    */
-  public static boolean hasMobColumns(TableDescriptor htd) {
-    ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
-    for (ColumnFamilyDescriptor hcd : hcds) {
+  public static boolean hasMobColumns(HTableDescriptor htd) {
+    HColumnDescriptor[] hcds = htd.getColumnFamilies();
+    for (HColumnDescriptor hcd : hcds) {
       if (hcd.isMobEnabled()) {
         return true;
       }
@@ -898,7 +899,7 @@ public final class MobUtils {
    * @param fileDate The date string parsed from the mob file name.
    * @return True if the mob file is expired.
    */
-  public static boolean isMobFileExpired(ColumnFamilyDescriptor column, long current, String fileDate) {
+  public static boolean isMobFileExpired(HColumnDescriptor column, long current, String fileDate) {
     if (column.getMinVersions() > 0) {
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
index 7ebdbc7..77de0cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
@@ -27,9 +27,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -42,14 +42,14 @@ public abstract class MobCompactor {
   protected FileSystem fs;
   protected Configuration conf;
   protected TableName tableName;
-  protected ColumnFamilyDescriptor column;
+  protected HColumnDescriptor column;
 
   protected Path mobTableDir;
   protected Path mobFamilyDir;
   protected ExecutorService pool;
 
   public MobCompactor(Configuration conf, FileSystem fs, TableName tableName,
-                      ColumnFamilyDescriptor column, ExecutorService pool) {
+    HColumnDescriptor column, ExecutorService pool) {
     this.conf = conf;
     this.fs = fs;
     this.tableName = tableName;

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index da664cd..d37292c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -45,13 +45,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
@@ -109,7 +109,7 @@ public class PartitionedMobCompactor extends MobCompactor {
   private Encryption.Context cryptoContext = Encryption.Context.NONE;
 
   public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName,
-                                 ColumnFamilyDescriptor column, ExecutorService pool) throws IOException {
+    HColumnDescriptor column, ExecutorService pool) throws IOException {
     super(conf, fs, tableName, column, pool);
     mergeableSize = conf.getLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,
       MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD);

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index eb9a5f7..e1d2ea1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.mapreduce.JobUtil;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@@ -109,13 +109,13 @@ public class CompactionTool extends Configured implements Tool {
       if (isFamilyDir(fs, path)) {
         Path regionDir = path.getParent();
         Path tableDir = regionDir.getParent();
-        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
         HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
         compactStoreFiles(tableDir, htd, hri,
             path.getName(), compactOnce, major);
       } else if (isRegionDir(fs, path)) {
         Path tableDir = path.getParent();
-        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
         compactRegion(tableDir, htd, path, compactOnce, major);
       } else if (isTableDir(fs, path)) {
         compactTable(path, compactOnce, major);
@@ -127,13 +127,13 @@ public class CompactionTool extends Configured implements Tool {
 
     private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
         throws IOException {
-      TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+      HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
       for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
         compactRegion(tableDir, htd, regionDir, compactOnce, major);
       }
     }
 
-    private void compactRegion(final Path tableDir, final TableDescriptor htd,
+    private void compactRegion(final Path tableDir, final HTableDescriptor htd,
         final Path regionDir, final boolean compactOnce, final boolean major)
         throws IOException {
       HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
@@ -147,7 +147,7 @@ public class CompactionTool extends Configured implements Tool {
      * If the compact once flag is not specified, execute the compaction until
      * no more compactions are needed. Uses the Configuration settings provided.
      */
-    private void compactStoreFiles(final Path tableDir, final TableDescriptor htd,
+    private void compactStoreFiles(final Path tableDir, final HTableDescriptor htd,
         final HRegionInfo hri, final String familyName, final boolean compactOnce,
         final boolean major) throws IOException {
       HStore store = getStore(conf, fs, tableDir, htd, hri, familyName, tmpDir);
@@ -177,7 +177,7 @@ public class CompactionTool extends Configured implements Tool {
      * the store dir to compact as source.
      */
     private static HStore getStore(final Configuration conf, final FileSystem fs,
-        final Path tableDir, final TableDescriptor htd, final HRegionInfo hri,
+        final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
         final String familyName, final Path tempDir) throws IOException {
       HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
         @Override
@@ -186,7 +186,7 @@ public class CompactionTool extends Configured implements Tool {
         }
       };
       HRegion region = new HRegion(regionFs, null, conf, htd, null);
-      return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf);
+      return new HStore(region, htd.getFamily(Bytes.toBytes(familyName)), conf);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 74a2998..9cb1316 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -41,12 +41,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -340,8 +340,8 @@ public class HRegionFileSystem {
    * @return true if region has reference file
    * @throws IOException
    */
-  public boolean hasReferences(final TableDescriptor htd) throws IOException {
-    for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
+  public boolean hasReferences(final HTableDescriptor htd) throws IOException {
+    for (HColumnDescriptor family : htd.getFamilies()) {
       if (hasReferences(family.getNameAsString())) {
         return true;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index ae2f7dd..28b7a43 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -50,7 +50,6 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.function.Function;
 
 import javax.management.MalformedObjectNameException;
 import javax.management.ObjectName;
@@ -90,7 +89,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.client.locking.LockServiceClient;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
@@ -702,11 +700,7 @@ public class HRegionServer extends HasThread implements
 
   protected TableDescriptors getFsTableDescriptors() throws IOException {
     return new FSTableDescriptors(this.conf,
-      this.fs, this.rootDir, !canUpdateTableDescriptor(), false, getMetaTableObserver());
-  }
-
-  protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
-    return null;
+      this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
   }
 
   protected void setInitLatch(CountDownLatch latch) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 7fc025a..9a25275 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MultiActionResultTooLarge;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.ServerName;
@@ -76,7 +77,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.VersionInfoUtil;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
@@ -1859,7 +1859,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
     OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
     final int regionCount = request.getOpenInfoCount();
-    final Map<TableName, TableDescriptor> htds = new HashMap<>(regionCount);
+    final Map<TableName, HTableDescriptor> htds = new HashMap<>(regionCount);
     final boolean isBulkAssign = regionCount > 1;
     try {
       checkOpen();
@@ -1898,7 +1898,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
     for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
       final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
-      TableDescriptor htd;
+      HTableDescriptor htd;
       try {
         String encodedName = region.getEncodedName();
         byte[] encodedNameBytes = region.getEncodedNameAsBytes();
@@ -2020,7 +2020,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
     RegionInfo regionInfo = request.getRegionInfo();
     final HRegionInfo region = HRegionInfo.convert(regionInfo);
-    TableDescriptor htd;
+    HTableDescriptor htd;
     WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance();
 
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
index 6913ecd..e49b164 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.regionserver.handler;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 
@@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 @InterfaceAudience.Private
 public class OpenMetaHandler extends OpenRegionHandler {
   public OpenMetaHandler(final Server server,
-                         final RegionServerServices rsServices, HRegionInfo regionInfo,
-                         final TableDescriptor htd, long masterSystemTime) {
+      final RegionServerServices rsServices, HRegionInfo regionInfo,
+      final HTableDescriptor htd, long masterSystemTime) {
     super(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_META);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
index ced9ef2..83d4d3f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java
@@ -19,10 +19,11 @@
 package org.apache.hadoop.hbase.regionserver.handler;
 
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 
 /**
@@ -33,7 +34,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 @InterfaceAudience.Private
 public class OpenPriorityRegionHandler extends OpenRegionHandler {
   public OpenPriorityRegionHandler(Server server, RegionServerServices rsServices,
-                                   HRegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) {
+      HRegionInfo regionInfo, HTableDescriptor htd, long masterSystemTime) {
     super(server, rsServices, regionInfo, htd, masterSystemTime,
        EventType.M_RS_OPEN_PRIORITY_REGION);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
index bbb084c..8369100 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
@@ -25,8 +25,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
@@ -48,18 +48,18 @@ public class OpenRegionHandler extends EventHandler {
   protected final RegionServerServices rsServices;
 
   private final HRegionInfo regionInfo;
-  private final TableDescriptor htd;
+  private final HTableDescriptor htd;
   private final long masterSystemTime;
 
   public OpenRegionHandler(final Server server,
       final RegionServerServices rsServices, HRegionInfo regionInfo,
-      TableDescriptor htd, long masterSystemTime) {
+      HTableDescriptor htd, long masterSystemTime) {
     this(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_REGION);
   }
 
   protected OpenRegionHandler(final Server server,
-                              final RegionServerServices rsServices, final HRegionInfo regionInfo,
-                              final TableDescriptor htd, long masterSystemTime, EventType eventType) {
+      final RegionServerServices rsServices, final HRegionInfo regionInfo,
+      final HTableDescriptor htd, long masterSystemTime, EventType eventType) {
     super(server, eventType);
     this.rsServices = rsServices;
     this.regionInfo = regionInfo;

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index 4ea0434..9f600da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
@@ -54,7 +55,6 @@ import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RetryingCallable;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
@@ -399,7 +399,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
       if (requiresReplication == null) {
         // check if the table requires memstore replication
         // some unit-test drop the table, so we should do a bypass check and always replicate.
-        TableDescriptor htd = tableDescriptors.get(tableName);
+        HTableDescriptor htd = tableDescriptors.get(tableName);
         requiresReplication = htd == null || htd.hasRegionMemstoreReplication();
         memstoreReplicationEnabled.put(tableName, requiresReplication);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index cae4c7e..9875ac0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -34,7 +34,6 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.ThreadPoolExecutor;
 
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -44,6 +43,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -125,7 +125,7 @@ public class RestoreSnapshotHelper {
   private final SnapshotDescription snapshotDesc;
   private final TableName snapshotTable;
 
-  private final TableDescriptor tableDesc;
+  private final HTableDescriptor tableDesc;
   private final Path rootDir;
   private final Path tableDir;
 
@@ -136,7 +136,7 @@ public class RestoreSnapshotHelper {
   public RestoreSnapshotHelper(final Configuration conf,
       final FileSystem fs,
       final SnapshotManifest manifest,
-      final TableDescriptor tableDescriptor,
+      final HTableDescriptor tableDescriptor,
       final Path rootDir,
       final ForeignExceptionDispatcher monitor,
       final MonitoredTask status) {
@@ -146,7 +146,7 @@ public class RestoreSnapshotHelper {
   public RestoreSnapshotHelper(final Configuration conf,
       final FileSystem fs,
       final SnapshotManifest manifest,
-      final TableDescriptor tableDescriptor,
+      final HTableDescriptor tableDescriptor,
       final Path rootDir,
       final ForeignExceptionDispatcher monitor,
       final MonitoredTask status,
@@ -265,18 +265,18 @@ public class RestoreSnapshotHelper {
    */
   public static class RestoreMetaChanges {
     private final Map<String, Pair<String, String> > parentsMap;
-    private final TableDescriptor htd;
+    private final HTableDescriptor htd;
 
     private List<HRegionInfo> regionsToRestore = null;
     private List<HRegionInfo> regionsToRemove = null;
     private List<HRegionInfo> regionsToAdd = null;
 
-    public RestoreMetaChanges(TableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
+    public RestoreMetaChanges(HTableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
       this.parentsMap = parentsMap;
       this.htd = htd;
     }
 
-    public TableDescriptor getTableDescriptor() {
+    public HTableDescriptor getTableDescriptor() {
       return htd;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 32cdabf..f70fe9e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -36,10 +36,10 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -76,7 +76,7 @@ public final class SnapshotManifest {
 
   private List<SnapshotRegionManifest> regionManifests;
   private SnapshotDescription desc;
-  private TableDescriptor htd;
+  private HTableDescriptor htd;
 
   private final ForeignExceptionSnare monitor;
   private final Configuration conf;
@@ -119,7 +119,7 @@ public final class SnapshotManifest {
   /**
    * Return a SnapshotManifest instance with the information already loaded in-memory.
    *    SnapshotManifest manifest = SnapshotManifest.open(...)
-   *    TableDescriptor htd = manifest.getTableDescriptor()
+   *    HTableDescriptor htd = manifest.getTableDescriptor()
    *    for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests())
    *      hri = regionManifest.getRegionInfo()
    *      for (regionManifest.getFamilyFiles())
@@ -136,7 +136,7 @@ public final class SnapshotManifest {
   /**
    * Add the table descriptor to the snapshot manifest
    */
-  public void addTableDescriptor(final TableDescriptor htd) throws IOException {
+  public void addTableDescriptor(final HTableDescriptor htd) throws IOException {
     this.htd = htd;
   }
 
@@ -182,7 +182,7 @@ public final class SnapshotManifest {
     LOG.debug("Creating references for mob files");
 
     Path mobRegionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable());
-    for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+    for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
       // 2.1. build the snapshot reference for the store if it's a mob store
       if (!hcd.isMobEnabled()) {
         continue;
@@ -377,7 +377,7 @@ public final class SnapshotManifest {
       case SnapshotManifestV2.DESCRIPTOR_VERSION: {
         SnapshotDataManifest dataManifest = readDataManifest();
         if (dataManifest != null) {
-          htd = ProtobufUtil.toTableDescriptor(dataManifest.getTableSchema());
+          htd = ProtobufUtil.convertToHTableDesc(dataManifest.getTableSchema());
           regionManifests = dataManifest.getRegionManifestsList();
         } else {
           // Compatibility, load the v1 regions
@@ -429,7 +429,7 @@ public final class SnapshotManifest {
   /**
    * Get the table descriptor from the Snapshot
    */
-  public TableDescriptor getTableDescriptor() {
+  public HTableDescriptor getTableDescriptor() {
     return this.htd;
   }
 
@@ -485,7 +485,7 @@ public final class SnapshotManifest {
     }
 
     SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder();
-    dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(htd));
+    dataManifestBuilder.setTableSchema(ProtobufUtil.convertToTableSchema(htd));
 
     if (v1Regions != null && v1Regions.size() > 0) {
       dataManifestBuilder.addAllRegionManifests(v1Regions);

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index a73883b..eb6b766 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -24,10 +24,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Ints;
 import edu.umd.cs.findbugs.annotations.Nullable;
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -39,19 +40,17 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Ints;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableInfoMissingException;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 
 /**
  * Implementation of {@link TableDescriptors} that reads descriptors from the
@@ -80,14 +79,10 @@ public class FSTableDescriptors implements TableDescriptors {
   private volatile boolean usecache;
   private volatile boolean fsvisited;
 
-  @VisibleForTesting
-  long cachehits = 0;
-  @VisibleForTesting
-  long invocations = 0;
+  @VisibleForTesting long cachehits = 0;
+  @VisibleForTesting long invocations = 0;
 
-  /**
-   * The file name prefix used to store HTD in HDFS
-   */
+  /** The file name prefix used to store HTD in HDFS  */
   static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
   static final String TABLEINFO_DIR = ".tabledesc";
   static final String TMP_DIR = ".tmp";
@@ -95,12 +90,12 @@ public class FSTableDescriptors implements TableDescriptors {
   // This cache does not age out the old stuff.  Thinking is that the amount
   // of data we keep up in here is so small, no need to do occasional purge.
   // TODO.
-  private final Map<TableName, TableDescriptor> cache = new ConcurrentHashMap<>();
+  private final Map<TableName, HTableDescriptor> cache = new ConcurrentHashMap<>();
 
   /**
    * Table descriptor for <code>hbase:meta</code> catalog table
    */
-  private final TableDescriptor metaTableDescriptor;
+  private final HTableDescriptor metaTableDescriptor;
 
   /**
    * Construct a FSTableDescriptors instance using the hbase root dir of the given
@@ -112,112 +107,91 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
   public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir)
-          throws IOException {
+  throws IOException {
     this(conf, fs, rootdir, false, true);
   }
 
   /**
    * @param fsreadonly True if we are read-only when it comes to filesystem
-   *                   operations; i.e. on remove, we do not do delete in fs.
-   */
-  public FSTableDescriptors(final Configuration conf, final FileSystem fs,
-                            final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
-    this(conf, fs, rootdir, fsreadonly, usecache, null);
-  }
-
-  /**
-   * @param fsreadonly True if we are read-only when it comes to filesystem
-   *                   operations; i.e. on remove, we do not do delete in fs.
-   * @param metaObserver Used by HMaster. It need to modify the META_REPLICAS_NUM for meta table descriptor.
-   *                     see HMaster#finishActiveMasterInitialization
-   *                     TODO: This is a workaround. Should remove this ugly code...
+   * operations; i.e. on remove, we do not do delete in fs.
    */
   public FSTableDescriptors(final Configuration conf, final FileSystem fs,
-                            final Path rootdir, final boolean fsreadonly, final boolean usecache,
-                            Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
+    final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
+    super();
     this.fs = fs;
     this.rootdir = rootdir;
     this.fsreadonly = fsreadonly;
     this.usecache = usecache;
-    this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf)
-          : metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
-  }
 
-  @VisibleForTesting
-  public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException {
-    return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
-            .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
-                    .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
-                            HConstants.DEFAULT_HBASE_META_VERSIONS))
-                    .setInMemory(true)
-                    .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
-                            HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
-                    .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-                    .setBloomFilterType(BloomType.NONE)
-                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
-                    // e.g. if using CombinedBlockCache (BucketCache).
-                    .setCacheDataInL1(true)
-                    .build())
-            .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY)
-                    .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
-                            HConstants.DEFAULT_HBASE_META_VERSIONS))
-                    .setInMemory(true)
-                    .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
-                            HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
-                    .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-                    .setBloomFilterType(BloomType.NONE)
-                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
-                    // e.g. if using CombinedBlockCache (BucketCache).
-                    .setCacheDataInL1(true)
-                    .build())
-            .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_POSITION_FAMILY)
-                    .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
-                            HConstants.DEFAULT_HBASE_META_VERSIONS))
-                    .setInMemory(true)
-                    .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
-                            HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
-                    .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-                    .setBloomFilterType(BloomType.NONE)
-                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
-                    // e.g. if using CombinedBlockCache (BucketCache).
-                    .setCacheDataInL1(true)
-                    .build())
-            .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_META_FAMILY)
-                    .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
-                            HConstants.DEFAULT_HBASE_META_VERSIONS))
-                    .setInMemory(true)
-                    .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
-                            HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
-                    .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-                    .setBloomFilterType(BloomType.NONE)
-                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
-                    // e.g. if using CombinedBlockCache (BucketCache).
-                    .setCacheDataInL1(true)
-                    .build())
-            .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY)
-                    // Ten is arbitrary number.  Keep versions to help debugging.
-                    .setMaxVersions(10)
-                    .setInMemory(true)
-                    .setBlocksize(8 * 1024)
-                    .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-                    // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
-                    .setBloomFilterType(BloomType.NONE)
-                    // Enable cache of data blocks in L1 if more than one caching tier deployed:
-                    // e.g. if using CombinedBlockCache (BucketCache).
-                    .setCacheDataInL1(true)
-                    .build())
-            .addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
-                    null, Coprocessor.PRIORITY_SYSTEM, null);
+    this.metaTableDescriptor = createMetaTableDescriptor(conf);
   }
 
   @VisibleForTesting
-  public static TableDescriptor createMetaTableDescriptor(final Configuration conf)
+  public static HTableDescriptor createMetaTableDescriptor(final Configuration conf)
       throws IOException {
-    return createMetaTableDescriptorBuilder(conf).build();
+    return new HTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+          .addColumnFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)
+            .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                    HConstants.DEFAULT_HBASE_META_VERSIONS))
+            .setInMemory(true)
+            .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+                    HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            // Enable cache of data blocks in L1 if more than one caching tier deployed:
+            // e.g. if using CombinedBlockCache (BucketCache).
+            .setCacheDataInL1(true))
+          .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY)
+            .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                    HConstants.DEFAULT_HBASE_META_VERSIONS))
+            .setInMemory(true)
+            .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+                    HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            // Enable cache of data blocks in L1 if more than one caching tier deployed:
+            // e.g. if using CombinedBlockCache (BucketCache).
+            .setCacheDataInL1(true))
+          .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY)
+            .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                    HConstants.DEFAULT_HBASE_META_VERSIONS))
+            .setInMemory(true)
+            .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+                    HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            // Enable cache of data blocks in L1 if more than one caching tier deployed:
+            // e.g. if using CombinedBlockCache (BucketCache).
+            .setCacheDataInL1(true))
+          .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY)
+            .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                    HConstants.DEFAULT_HBASE_META_VERSIONS))
+            .setInMemory(true)
+            .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+                    HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            // Enable cache of data blocks in L1 if more than one caching tier deployed:
+            // e.g. if using CombinedBlockCache (BucketCache).
+            .setCacheDataInL1(true))
+          .addColumnFamily(new HColumnDescriptor(HConstants.TABLE_FAMILY)
+            // Ten is arbitrary number.  Keep versions to help debugging.
+            .setMaxVersions(10)
+            .setInMemory(true)
+            .setBlocksize(8 * 1024)
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            // Enable cache of data blocks in L1 if more than one caching tier deployed:
+            // e.g. if using CombinedBlockCache (BucketCache).
+            .setCacheDataInL1(true))
+          .addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
+            null, Coprocessor.PRIORITY_SYSTEM, null)
+          .build());
   }
 
   @Override
@@ -245,7 +219,7 @@ public class FSTableDescriptors implements TableDescriptors {
    */
   @Override
   @Nullable
-  public TableDescriptor get(final TableName tablename)
+  public HTableDescriptor get(final TableName tablename)
   throws IOException {
     invocations++;
     if (TableName.META_TABLE_NAME.equals(tablename)) {
@@ -260,13 +234,13 @@ public class FSTableDescriptors implements TableDescriptors {
 
     if (usecache) {
       // Look in cache of descriptors.
-      TableDescriptor cachedtdm = this.cache.get(tablename);
+      HTableDescriptor cachedtdm = this.cache.get(tablename);
       if (cachedtdm != null) {
         cachehits++;
         return cachedtdm;
       }
     }
-    TableDescriptor tdmt = null;
+    HTableDescriptor tdmt = null;
     try {
       tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
     } catch (NullPointerException e) {
@@ -290,21 +264,21 @@ public class FSTableDescriptors implements TableDescriptors {
    * Returns a map from table name to table descriptor for all tables.
    */
   @Override
-  public Map<String, TableDescriptor> getAllDescriptors()
+  public Map<String, HTableDescriptor> getAllDescriptors()
   throws IOException {
-    Map<String, TableDescriptor> tds = new TreeMap<>();
+    Map<String, HTableDescriptor> tds = new TreeMap<>();
 
     if (fsvisited && usecache) {
-      for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
+      for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
         tds.put(entry.getKey().toString(), entry.getValue());
       }
       // add hbase:meta to the response
-      tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
+      tds.put(this.metaTableDescriptor.getNameAsString(), metaTableDescriptor);
     } else {
       LOG.debug("Fetching table descriptors from the filesystem.");
       boolean allvisited = true;
       for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
-        TableDescriptor htd = null;
+        HTableDescriptor htd = null;
         try {
           htd = get(FSUtils.getTableName(d));
         } catch (FileNotFoundException fnfe) {
@@ -327,10 +301,10 @@ public class FSTableDescriptors implements TableDescriptors {
    * Returns a map from table name to table descriptor for all tables.
    */
   @Override
-  public Map<String, TableDescriptor> getAll() throws IOException {
-    Map<String, TableDescriptor> htds = new TreeMap<>();
-    Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
-    for (Map.Entry<String, TableDescriptor> entry : allDescriptors
+  public Map<String, HTableDescriptor> getAll() throws IOException {
+    Map<String, HTableDescriptor> htds = new TreeMap<>();
+    Map<String, HTableDescriptor> allDescriptors = getAllDescriptors();
+    for (Map.Entry<String, HTableDescriptor> entry : allDescriptors
         .entrySet()) {
       htds.put(entry.getKey(), entry.getValue());
     }
@@ -342,13 +316,13 @@ public class FSTableDescriptors implements TableDescriptors {
     * @see #get(org.apache.hadoop.hbase.TableName)
     */
   @Override
-  public Map<String, TableDescriptor> getByNamespace(String name)
+  public Map<String, HTableDescriptor> getByNamespace(String name)
   throws IOException {
-    Map<String, TableDescriptor> htds = new TreeMap<>();
+    Map<String, HTableDescriptor> htds = new TreeMap<>();
     List<Path> tableDirs =
         FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
     for (Path d: tableDirs) {
-      TableDescriptor htd = null;
+      HTableDescriptor htd = null;
       try {
         htd = get(FSUtils.getTableName(d));
       } catch (FileNotFoundException fnfe) {
@@ -366,7 +340,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * and updates the local cache with it.
    */
   @Override
-  public void add(TableDescriptor htd) throws IOException {
+  public void add(HTableDescriptor htd) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
     }
@@ -377,7 +351,7 @@ public class FSTableDescriptors implements TableDescriptors {
     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
       throw new NotImplementedException(
           "Cannot add a table descriptor for a reserved subdirectory name: "
-              + htd.getTableName().getNameAsString());
+              + htd.getNameAsString());
     }
     updateTableDescriptor(htd);
   }
@@ -388,7 +362,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * from the FileSystem.
    */
   @Override
-  public TableDescriptor remove(final TableName tablename)
+  public HTableDescriptor remove(final TableName tablename)
   throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
@@ -399,7 +373,7 @@ public class FSTableDescriptors implements TableDescriptors {
         throw new IOException("Failed delete of " + tabledir.toString());
       }
     }
-    TableDescriptor descriptor = this.cache.remove(tablename);
+    HTableDescriptor descriptor = this.cache.remove(tablename);
     return descriptor;
   }
 
@@ -583,7 +557,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * if it exists, bypassing the local cache.
    * Returns null if it's not found.
    */
-  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
+  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
       Path hbaseRootDir, TableName tableName) throws IOException {
     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     return getTableDescriptorFromFs(fs, tableDir);
@@ -594,7 +568,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * directly from the file system if it exists.
    * @throws TableInfoMissingException if there is no descriptor
    */
-  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
+  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
   throws IOException {
     FileStatus status = getTableInfoPath(fs, tableDir, false);
     if (status == null) {
@@ -603,7 +577,7 @@ public class FSTableDescriptors implements TableDescriptors {
     return readTableDescriptor(fs, status);
   }
 
-  private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
+  private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
       throws IOException {
     int len = Ints.checkedCast(status.getLen());
     byte [] content = new byte[len];
@@ -613,9 +587,9 @@ public class FSTableDescriptors implements TableDescriptors {
     } finally {
       fsDataInputStream.close();
     }
-    TableDescriptor htd = null;
+    HTableDescriptor htd = null;
     try {
-      htd = TableDescriptorBuilder.parseFrom(content);
+      htd = HTableDescriptor.parseFrom(content);
     } catch (DeserializationException e) {
       throw new IOException("content=" + Bytes.toShort(content), e);
     }
@@ -627,7 +601,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException Thrown if failed update.
    * @throws NotImplementedException if in read only mode
    */
-  @VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
+  @VisibleForTesting Path updateTableDescriptor(HTableDescriptor td)
   throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
@@ -689,7 +663,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @return Descriptor file or null if we failed write.
    */
   private static Path writeTableDescriptor(final FileSystem fs,
-    final TableDescriptor htd, final Path tableDir,
+    final HTableDescriptor htd, final Path tableDir,
     final FileStatus currentDescriptorFile)
   throws IOException {
     // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
@@ -744,42 +718,42 @@ public class FSTableDescriptors implements TableDescriptors {
     return tableInfoDirPath;
   }
 
-  private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
+  private static void writeTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
   throws IOException {
     FSDataOutputStream out = fs.create(p, false);
     try {
       // We used to write this file out as a serialized HTD Writable followed by two '\n's and then
       // the toString version of HTD.  Now we just write out the pb serialization.
-      out.write(TableDescriptorBuilder.toByteArray(htd));
+      out.write(htd.toByteArray());
     } finally {
       out.close();
     }
   }
 
   /**
-   * Create new TableDescriptor in HDFS. Happens when we are creating table.
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
    * Used by tests.
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
+  public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
     return createTableDescriptor(htd, false);
   }
 
   /**
-   * Create new TableDescriptor in HDFS. Happens when we are creating table. If
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
    * forceCreation is true then even if previous table descriptor is present it
    * will be overwritten
    *
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
+  public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
   throws IOException {
     Path tableDir = getTableDir(htd.getTableName());
     return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
   }
 
   /**
-   * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
+   * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
    * a new table or snapshot a table.
    * @param tableDir table directory under which we should write the file
    * @param htd description of the table to write
@@ -790,7 +764,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException if a filesystem error occurs
    */
   public boolean createTableDescriptorForTableDirectory(Path tableDir,
-      TableDescriptor htd, boolean forceCreation) throws IOException {
+      HTableDescriptor htd, boolean forceCreation) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 199ed7d..ff5d482 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -17,10 +17,6 @@
  */
 package org.apache.hadoop.hbase.util;
 
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
@@ -88,9 +84,11 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -941,7 +939,7 @@ public class HBaseFsck extends Configured implements Closeable {
     TableName tableName = hi.getTableName();
     TableInfo tableInfo = tablesInfo.get(tableName);
     Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
-    TableDescriptor template = tableInfo.getHTD();
+    HTableDescriptor template = tableInfo.getHTD();
 
     // find min and max key values
     Pair<byte[],byte[]> orphanRegionRange = null;
@@ -1202,17 +1200,17 @@ public class HBaseFsck extends Configured implements Closeable {
    */
   private void reportTablesInFlux() {
     AtomicInteger numSkipped = new AtomicInteger(0);
-    TableDescriptor[] allTables = getTables(numSkipped);
+    HTableDescriptor[] allTables = getTables(numSkipped);
     errors.print("Number of Tables: " + allTables.length);
     if (details) {
       if (numSkipped.get() > 0) {
         errors.detail("Number of Tables in flux: " + numSkipped.get());
       }
-      for (TableDescriptor td : allTables) {
+      for (HTableDescriptor td : allTables) {
         errors.detail("  Table: " + td.getTableName() + "\t" +
                            (td.isReadOnly() ? "ro" : "rw") + "\t" +
                             (td.isMetaRegion() ? "META" : "    ") + "\t" +
-                           " families: " + td.getColumnFamilyCount());
+                           " families: " + td.getFamilies().size());
       }
     }
   }
@@ -1316,7 +1314,7 @@ public class HBaseFsck extends Configured implements Closeable {
         modTInfo = new TableInfo(tableName);
         tablesInfo.put(tableName, modTInfo);
         try {
-          TableDescriptor htd =
+          HTableDescriptor htd =
               FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
           modTInfo.htds.add(htd);
         } catch (IOException ioe) {
@@ -1363,17 +1361,17 @@ public class HBaseFsck extends Configured implements Closeable {
    * To fabricate a .tableinfo file with following contents<br>
    * 1. the correct tablename <br>
    * 2. the correct colfamily list<br>
-   * 3. the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}<br>
+   * 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
    * @throws IOException
    */
   private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
       Set<String> columns) throws IOException {
     if (columns ==null || columns.isEmpty()) return false;
-    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
+    HTableDescriptor htd = new HTableDescriptor(tableName);
     for (String columnfamimly : columns) {
-      builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
+      htd.addFamily(new HColumnDescriptor(columnfamimly));
     }
-    fstd.createTableDescriptor(builder.build(), true);
+    fstd.createTableDescriptor(htd, true);
     return true;
   }
 
@@ -1398,7 +1396,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * 2. else create a default .tableinfo file with following items<br>
    * &nbsp;2.1 the correct tablename <br>
    * &nbsp;2.2 the correct colfamily list<br>
-   * &nbsp;2.3 the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}<br>
+   * &nbsp;2.3 the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
    * @throws IOException
    */
   public void fixOrphanTables() throws IOException {
@@ -1406,7 +1404,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
       List<TableName> tmpList = new ArrayList<>(orphanTableDirs.keySet().size());
       tmpList.addAll(orphanTableDirs.keySet());
-      TableDescriptor[] htds = getTableDescriptors(tmpList);
+      HTableDescriptor[] htds = getHTableDescriptors(tmpList);
       Iterator<Entry<TableName, Set<String>>> iter =
           orphanTableDirs.entrySet().iterator();
       int j = 0;
@@ -1419,7 +1417,7 @@ public class HBaseFsck extends Configured implements Closeable {
         LOG.info("Trying to fix orphan table error: " + tableName);
         if (j < htds.length) {
           if (tableName.equals(htds[j].getTableName())) {
-            TableDescriptor htd = htds[j];
+            HTableDescriptor htd = htds[j];
             LOG.info("fixing orphan table: " + tableName + " from cache");
             fstd.createTableDescriptor(htd, true);
             j++;
@@ -1428,7 +1426,7 @@ public class HBaseFsck extends Configured implements Closeable {
         } else {
           if (fabricateTableInfo(fstd, tableName, entry.getValue())) {
             LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file");
-            LOG.warn("Strongly recommend to modify the TableDescriptor if necessary for: " + tableName);
+            LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName);
             iter.remove();
           } else {
             LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information");
@@ -1465,7 +1463,7 @@ public class HBaseFsck extends Configured implements Closeable {
     Path rootdir = FSUtils.getRootDir(getConf());
     Configuration c = getConf();
     HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
-    TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
+    HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
     MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
     // The WAL subsystem will use the default rootDir rather than the passed in rootDir
     // unless I pass along via the conf.
@@ -2648,8 +2646,8 @@ public class HBaseFsck extends Configured implements Closeable {
    * regions reported for the table, but table dir is there in hdfs
    */
   private void loadTableInfosForTablesWithNoRegion() throws IOException {
-    Map<String, TableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
-    for (TableDescriptor htd : allTables.values()) {
+    Map<String, HTableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
+    for (HTableDescriptor htd : allTables.values()) {
       if (checkMetaOnly && !htd.isMetaTable()) {
         continue;
       }
@@ -2772,8 +2770,8 @@ public class HBaseFsck extends Configured implements Closeable {
     // region split calculator
     final RegionSplitCalculator<HbckInfo> sc = new RegionSplitCalculator<>(cmp);
 
-    // Histogram of different TableDescriptors found.  Ideally there is only one!
-    final Set<TableDescriptor> htds = new HashSet<>();
+    // Histogram of different HTableDescriptors found.  Ideally there is only one!
+    final Set<HTableDescriptor> htds = new HashSet<>();
 
     // key = start split, values = set of splits in problem group
     final Multimap<byte[], HbckInfo> overlapGroups =
@@ -2790,9 +2788,9 @@ public class HBaseFsck extends Configured implements Closeable {
     /**
      * @return descriptor common to all regions.  null if are none or multiple!
      */
-    private TableDescriptor getHTD() {
+    private HTableDescriptor getHTD() {
       if (htds.size() == 1) {
-        return (TableDescriptor)htds.toArray()[0];
+        return (HTableDescriptor)htds.toArray()[0];
       } else {
         LOG.error("None/Multiple table descriptors found for table '"
           + tableName + "' regions: " + htds);
@@ -2962,7 +2960,7 @@ public class HBaseFsck extends Configured implements Closeable {
             "First region should start with an empty key.  Creating a new " +
             "region and regioninfo in HDFS to plug the hole.",
             getTableInfo(), next);
-        TableDescriptor htd = getTableInfo().getHTD();
+        HTableDescriptor htd = getTableInfo().getHTD();
         // from special EMPTY_START_ROW to next region's startKey
         HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
             HConstants.EMPTY_START_ROW, next.getStartKey());
@@ -2979,7 +2977,7 @@ public class HBaseFsck extends Configured implements Closeable {
         errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
             "Last region should end with an empty key.  Creating a new "
                 + "region and regioninfo in HDFS to plug the hole.", getTableInfo());
-        TableDescriptor htd = getTableInfo().getHTD();
+        HTableDescriptor htd = getTableInfo().getHTD();
         // from curEndKey to EMPTY_START_ROW
         HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
             HConstants.EMPTY_START_ROW);
@@ -3003,7 +3001,7 @@ public class HBaseFsck extends Configured implements Closeable {
                 + Bytes.toStringBinary(holeStopKey)
                 + ".  Creating a new regioninfo and region "
                 + "dir in hdfs to plug the hole.");
-        TableDescriptor htd = getTableInfo().getHTD();
+        HTableDescriptor htd = getTableInfo().getHTD();
         HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
@@ -3204,7 +3202,7 @@ public class HBaseFsck extends Configured implements Closeable {
         }
 
         // create new empty container region.
-        TableDescriptor htd = getTableInfo().getHTD();
+        HTableDescriptor htd = getTableInfo().getHTD();
         // from start key to end Key
         HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
             range.getSecond());
@@ -3505,7 +3503,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * @return tables that have not been modified recently
    * @throws IOException if an error is encountered
    */
-  TableDescriptor[] getTables(AtomicInteger numSkipped) {
+  HTableDescriptor[] getTables(AtomicInteger numSkipped) {
     List<TableName> tableNames = new ArrayList<>();
     long now = EnvironmentEdgeManager.currentTime();
 
@@ -3522,19 +3520,19 @@ public class HBaseFsck extends Configured implements Closeable {
         }
       }
     }
-    return getTableDescriptors(tableNames);
+    return getHTableDescriptors(tableNames);
   }
 
-  TableDescriptor[] getTableDescriptors(List<TableName> tableNames) {
-      LOG.info("getTableDescriptors == tableNames => " + tableNames);
+  HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
+    HTableDescriptor[] htd = new HTableDescriptor[0];
+      LOG.info("getHTableDescriptors == tableNames => " + tableNames);
     try (Connection conn = ConnectionFactory.createConnection(getConf());
         Admin admin = conn.getAdmin()) {
-      List<TableDescriptor> tds = admin.listTableDescriptors(tableNames);
-      return tds.toArray(new TableDescriptor[tds.size()]);
+      htd = admin.getTableDescriptorsByTableName(tableNames);
     } catch (IOException e) {
       LOG.debug("Exception getting table descriptors", e);
     }
-    return new TableDescriptor[0];
+    return htd;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 6552b32..961e8a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -188,7 +188,7 @@ public class HBaseFsckRepair {
    * Creates, flushes, and closes a new region.
    */
   public static HRegion createHDFSRegionDir(Configuration conf,
-      HRegionInfo hri, TableDescriptor htd) throws IOException {
+      HRegionInfo hri, HTableDescriptor htd) throws IOException {
     // Create HRegion
     Path root = FSUtils.getRootDir(conf);
     HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index cc478a5..8ea7012 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -39,8 +39,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 
 /**
  * Utility methods for interacting with the regions.
@@ -60,13 +61,13 @@ public abstract class ModifyRegionUtils {
     void editRegion(final HRegionInfo region) throws IOException;
   }
 
-  public static HRegionInfo[] createHRegionInfos(TableDescriptor tableDescriptor,
+  public static HRegionInfo[] createHRegionInfos(HTableDescriptor hTableDescriptor,
       byte[][] splitKeys) {
     long regionId = System.currentTimeMillis();
     HRegionInfo[] hRegionInfos = null;
     if (splitKeys == null || splitKeys.length == 0) {
       hRegionInfos = new HRegionInfo[]{
-        new HRegionInfo(tableDescriptor.getTableName(), null, null, false, regionId)
+        new HRegionInfo(hTableDescriptor.getTableName(), null, null, false, regionId)
       };
     } else {
       int numRegions = splitKeys.length + 1;
@@ -76,7 +77,7 @@ public abstract class ModifyRegionUtils {
       for (int i = 0; i < numRegions; i++) {
         endKey = (i == splitKeys.length) ? null : splitKeys[i];
         hRegionInfos[i] =
-             new HRegionInfo(tableDescriptor.getTableName(), startKey, endKey,
+             new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
                  false, regionId);
         startKey = endKey;
       }
@@ -90,20 +91,20 @@ public abstract class ModifyRegionUtils {
    *
    * @param conf {@link Configuration}
    * @param rootDir Root directory for HBase instance
-   * @param tableDescriptor description of the table
+   * @param hTableDescriptor description of the table
    * @param newRegions {@link HRegionInfo} that describes the regions to create
    * @param task {@link RegionFillTask} custom code to populate region after creation
    * @throws IOException
    */
   public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
-      final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
+      final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
       final RegionFillTask task) throws IOException {
     if (newRegions == null) return null;
     int regionNumber = newRegions.length;
     ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf,
-        "RegionOpenAndInitThread-" + tableDescriptor.getTableName(), regionNumber);
+        "RegionOpenAndInitThread-" + hTableDescriptor.getTableName(), regionNumber);
     try {
-      return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task);
+      return createRegions(exec, conf, rootDir, hTableDescriptor, newRegions, task);
     } finally {
       exec.shutdownNow();
     }
@@ -116,15 +117,15 @@ public abstract class ModifyRegionUtils {
    * @param exec Thread Pool Executor
    * @param conf {@link Configuration}
    * @param rootDir Root directory for HBase instance
-   * @param tableDescriptor description of the table
+   * @param hTableDescriptor description of the table
    * @param newRegions {@link HRegionInfo} that describes the regions to create
    * @param task {@link RegionFillTask} custom code to populate region after creation
    * @throws IOException
    */
   public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec,
-                                                final Configuration conf, final Path rootDir,
-                                                final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
-                                                final RegionFillTask task) throws IOException {
+      final Configuration conf, final Path rootDir,
+      final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
+      final RegionFillTask task) throws IOException {
     if (newRegions == null) return null;
     int regionNumber = newRegions.length;
     CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<>(exec);
@@ -133,7 +134,7 @@ public abstract class ModifyRegionUtils {
       completionService.submit(new Callable<HRegionInfo>() {
         @Override
         public HRegionInfo call() throws IOException {
-          return createRegion(conf, rootDir, tableDescriptor, newRegion, task);
+          return createRegion(conf, rootDir, hTableDescriptor, newRegion, task);
         }
       });
     }
@@ -155,20 +156,20 @@ public abstract class ModifyRegionUtils {
    * Create new set of regions on the specified file-system.
    * @param conf {@link Configuration}
    * @param rootDir Root directory for HBase instance
-   * @param tableDescriptor description of the table
+   * @param hTableDescriptor description of the table
    * @param newRegion {@link HRegionInfo} that describes the region to create
    * @param task {@link RegionFillTask} custom code to populate region after creation
    * @throws IOException
    */
   public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
-      final TableDescriptor tableDescriptor, final HRegionInfo newRegion,
+      final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion,
       final RegionFillTask task) throws IOException {
     // 1. Create HRegion
     // The WAL subsystem will use the default rootDir rather than the passed in rootDir
     // unless I pass along via the conf.
     Configuration confForWAL = new Configuration(conf);
     confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
-    HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false);
+    HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, hTableDescriptor, null, false);
     try {
       // 2. Custom user code to interact with the created region
       if (task != null) {