You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2019/10/04 03:49:38 UTC

[hbase] branch HBASE-23055 updated (c7ed9f4 -> b5a1307)

This is an automated email from the ASF dual-hosted git repository.

stack pushed a change to branch HBASE-23055
in repository https://gitbox.apache.org/repos/asf/hbase.git.


 discard c7ed9f4      HBASE-23055 Alter hbase:meta (#667)
     add 7ee6d59  HBASE-23092 Make the RM tooling in dev-tools/create-release generic (#671)
     add 5aa8d3a  HBASE-22874 Define a public API for Canary checking and a non-public tool implementation
     new 6e00219      HBASE-23055 Alter hbase:meta (#667)
     new b5a1307  HBASE-23103 Survey incidence of table state queries Log at trace level when getTableState called

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (c7ed9f4)
            \
             N -- N -- N   refs/heads/HBASE-23055 (b5a1307)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 bin/hbase                                          |    2 +-
 dev-support/create-release/README.txt              |    8 +
 dev-support/create-release/do-release-docker.sh    |   34 +-
 dev-support/create-release/do-release.sh           |   11 +-
 dev-support/create-release/release-build.sh        |  241 ++-
 dev-support/create-release/release-tag.sh          |   34 +-
 dev-support/create-release/release-util.sh         |  155 +-
 dev-support/create-release/vote.tmpl               |   10 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon       |    4 +-
 .../hadoop/hbase/master/MasterRpcServices.java     |    5 +
 .../java/org/apache/hadoop/hbase/tool/Canary.java  | 1764 +-------------------
 .../hbase/tool/{Canary.java => CanaryTool.java}    |  259 ++-
 .../apache/hadoop/hbase/tool/TestCanaryTool.java   |   39 +-
 13 files changed, 509 insertions(+), 2057 deletions(-)
 copy hbase-server/src/main/java/org/apache/hadoop/hbase/tool/{Canary.java => CanaryTool.java} (89%)


[hbase] 02/02: HBASE-23103 Survey incidence of table state queries Log at trace level when getTableState called

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch HBASE-23055
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit b5a13077e35a167ff3d0669a50a0537d741cb02f
Author: stack <st...@apache.org>
AuthorDate: Thu Oct 3 20:47:56 2019 -0700

    HBASE-23103 Survey incidence of table state queries
    Log at trace level when getTableState called
---
 .../main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java  | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 06a99fa..bb5ebc1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1131,6 +1131,11 @@ public class MasterRpcServices extends RSRpcServices
     try {
       master.checkServiceStarted();
       TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+      if (LOG.isTraceEnabled()) {
+        User caller = RpcServer.getRequestUser().orElse(null);
+        String client = RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("");
+        LOG.trace("Get Table State {}, {}, {}", tableName.getNameAsString(), caller, client);
+      }
       TableState ts = master.getTableStateManager().getTableState(tableName);
       GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder();
       builder.setTableState(ts.convert());


[hbase] 01/02: HBASE-23055 Alter hbase:meta (#667)

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch HBASE-23055
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 6e00219b36117e1238af6159951227db5cf76498
Author: Michael Stack <sa...@users.noreply.github.com>
AuthorDate: Mon Sep 30 09:29:51 2019 -0700

        HBASE-23055 Alter hbase:meta (#667)
    
    Make it so hbase:meta can be altered. TableState for hbase:meta
        is kept in Master. State is in-memory transient so if  Master
        fails, hbase:meta is ENABLED again. hbase:meta schema will be
        bootstrapped from the filesystem. Changes to filesystem schema
        are atomic so we should be ok if Master fails mid-edit (TBD)
        Undoes a bunch of guards that prevented our being able to edit
        hbase:meta. At minimmum, need to add in a bunch of WARNING.
    
        TODO: Tests, more clarity around hbase:meta table state, and undoing
        references to hard-coded hbase:meta regioninfo.
    
        M hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
         Throw illegal access exception if you try to use MetaTableAccessor
         getting state of the hbase:meta table.
    
        M hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
         For table state, go to master rather than go to meta direct. Going
         to meta won't work for hbase;meta state. Puts load on Master.
    
        M hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
         Change isTableDisabled/Enabled implementation to ask the Master instead.
         This will give the Master's TableStateManager's opinion rather than
         client figuring it for themselves reading meta table direct.
    
        M hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
         TODO: Cleanup in here. Go to master for state, not to meta.
    
        M hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java
         Logging cleanup.
    
        M hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
         Shutdown access.
    
        M hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
         Just cleanup.
    
        M hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
         Add state holder for hbase:meta.
         Removed unused methods.
    
        M hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
         Shut down access.
    
        M hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
         Allow hbase:meta to be disabled.
    
        M hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
         Allow hbase:meta to be enabled.
    
     Signed-off-by: Ramkrishna <ra...@intel.com>
---
 .../hadoop/hbase/AsyncMetaTableAccessor.java       |   3 +-
 .../org/apache/hadoop/hbase/MetaTableAccessor.java |  20 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  49 +++----
 .../hadoop/hbase/client/ZKAsyncRegistry.java       |  12 +-
 .../apache/hadoop/hbase/zookeeper/ZNodePaths.java  |  59 ++++----
 .../java/org/apache/hadoop/hbase/HConstants.java   |   3 +-
 .../org/apache/hadoop/hbase/TableDescriptors.java  |  20 +--
 .../org/apache/hadoop/hbase/master/HMaster.java    |   7 +-
 .../hadoop/hbase/master/TableStateManager.java     | 115 ++++-----------
 .../hbase/master/assignment/RegionStateStore.java  |   3 +-
 .../master/procedure/CreateTableProcedure.java     |   4 +-
 .../master/procedure/DisableTableProcedure.java    |  10 +-
 .../master/procedure/EnableTableProcedure.java     | 149 ++++++++++---------
 .../master/replication/AbstractPeerProcedure.java  |   6 +-
 .../master/replication/ModifyPeerProcedure.java    |   6 +-
 .../hbase/master/zksyncer/MetaLocationSyncer.java  |   6 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |   7 +-
 .../RejectReplicationRequestStateChecker.java      |   5 +-
 .../hadoop/hbase/util/FSTableDescriptors.java      | 163 ++++++++-------------
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |  12 --
 .../org/apache/hadoop/hbase/TestHBaseMetaEdit.java | 109 ++++++++++++++
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |  17 ---
 .../client/TestAsyncAccessControlAdminApi.java     |   2 +-
 .../hbase/client/TestAsyncTableAdminApi2.java      |  13 --
 .../hbase/client/TestAsyncTableAdminApi3.java      |  12 --
 .../hadoop/hbase/client/TestMetaWithReplicas.java  |   7 +
 .../hadoop/hbase/regionserver/TestClusterId.java   |   2 +-
 .../hbase/regionserver/TestDefaultMemStore.java    |   2 +-
 .../regionserver/TestGetClosestAtOrBefore.java     |  59 +++++++-
 .../regionserver/wal/TestLogRollingNoCluster.java  |   8 +-
 .../hadoop/hbase/util/TestFSTableDescriptors.java  |   7 +-
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   |  18 +--
 .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java  |   2 +-
 33 files changed, 461 insertions(+), 456 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
index d04ea52..bc769ba 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -616,4 +616,5 @@ public class AsyncMetaTableAccessor {
     }
     return -1;
   }
+
 }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index ad54324..3ad2a06 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -303,11 +303,18 @@ public class MetaTableAccessor {
    */
   public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo)
       throws IOException {
-    byte[] row = getMetaKeyForRegion(regionInfo);
-    Get get = new Get(row);
+    return getRegionLocation(getCatalogFamilyRow(connection, regionInfo),
+        regionInfo, regionInfo.getReplicaId());
+  }
+
+  /**
+   * @return Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta.
+   */
+  public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri)
+      throws IOException {
+    Get get = new Get(getMetaKeyForRegion(ri));
     get.addFamily(HConstants.CATALOG_FAMILY);
-    Result r = get(getMetaHTable(connection), get);
-    return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
+    return get(getMetaHTable(connection), get);
   }
 
   /** Returns the row key to use for this regionInfo */
@@ -1077,7 +1084,7 @@ public class MetaTableAccessor {
   public static TableState getTableState(Connection conn, TableName tableName)
       throws IOException {
     if (tableName.equals(TableName.META_TABLE_NAME)) {
-      return new TableState(tableName, TableState.State.ENABLED);
+      throw new IllegalAccessError("Go to the Master to find hbase:meta table state, not here");
     }
     Table metaHTable = getMetaHTable(conn);
     Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
@@ -1105,7 +1112,8 @@ public class MetaTableAccessor {
   }
 
   /**
-   * Updates state in META
+   * Updates state in META.
+   * Do not use. For internal use only.
    * @param conn connection to use
    * @param tableName table to look for
    */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 92cc0dc..6e8439b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -192,6 +192,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDe
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
@@ -666,42 +668,25 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
 
   @Override
   public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
-    if (TableName.isMetaTableName(tableName)) {
-      return CompletableFuture.completedFuture(true);
-    }
-    CompletableFuture<Boolean> future = new CompletableFuture<>();
-    addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
-      if (error != null) {
-        future.completeExceptionally(error);
-        return;
-      }
-      if (state.isPresent()) {
-        future.complete(state.get().inStates(TableState.State.ENABLED));
-      } else {
-        future.completeExceptionally(new TableNotFoundException(tableName));
-      }
-    });
-    return future;
+    return isTableState(tableName, TableState.State.ENABLED);
   }
 
   @Override
   public CompletableFuture<Boolean> isTableDisabled(TableName tableName) {
-    if (TableName.isMetaTableName(tableName)) {
-      return CompletableFuture.completedFuture(false);
-    }
-    CompletableFuture<Boolean> future = new CompletableFuture<>();
-    addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
-      if (error != null) {
-        future.completeExceptionally(error);
-        return;
-      }
-      if (state.isPresent()) {
-        future.complete(state.get().inStates(TableState.State.DISABLED));
-      } else {
-        future.completeExceptionally(new TableNotFoundException(tableName));
-      }
-    });
-    return future;
+    return isTableState(tableName, TableState.State.DISABLED);
+  }
+
+  /**
+   * @return Future that calls Master getTableState and compares to <code>state</code>
+   */
+  private CompletableFuture<Boolean> isTableState(TableName tableName, TableState.State state) {
+    return this.<Boolean> newMasterCaller().
+        action((controller, stub) ->
+      this.<GetTableStateRequest, GetTableStateResponse, Boolean> call(controller, stub,
+          GetTableStateRequest.newBuilder().
+              setTableName(ProtobufUtil.toProtoTableName(tableName)).build(),
+        (s, c, req, done) -> s.getTableState(c, req, done),
+        resp -> resp.getTableState().getState().toString().equals(state.toString()))).call();
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java
index 36fa6bb..0a021aa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -158,7 +158,8 @@ class ZKAsyncRegistry implements AsyncRegistry {
           }
           Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
           if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
-            LOG.warn("Meta region is in state " + stateAndServerName.getFirst());
+            LOG.warn("hbase:meta region (replicaId={}) is in state {}", replicaId,
+                stateAndServerName.getFirst());
           }
           locs[DEFAULT_REPLICA_ID] = new HRegionLocation(
             getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
@@ -173,7 +174,7 @@ class ZKAsyncRegistry implements AsyncRegistry {
             LOG.warn("Failed to fetch " + path, error);
             locs[replicaId] = null;
           } else if (proto == null) {
-            LOG.warn("Meta znode for replica " + replicaId + " is null");
+            LOG.warn("hbase:meta znode for replica " + replicaId + " is null");
             locs[replicaId] = null;
           } else {
             Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
@@ -197,9 +198,8 @@ class ZKAsyncRegistry implements AsyncRegistry {
   public CompletableFuture<RegionLocations> getMetaRegionLocation() {
     CompletableFuture<RegionLocations> future = new CompletableFuture<>();
     addListener(
-      zk.list(znodePaths.baseZNode)
-        .thenApply(children -> children.stream()
-          .filter(c -> c.startsWith(znodePaths.metaZNodePrefix)).collect(Collectors.toList())),
+      zk.list(znodePaths.baseZNode).thenApply(children -> children.stream().
+          filter(c -> znodePaths.isMetaZNodePrefix(c)).collect(Collectors.toList())),
       (metaReplicaZNodes, error) -> {
         if (error != null) {
           future.completeExceptionally(error);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
index c5e510f..cfa0e82 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -24,6 +24,7 @@ import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME;
 import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
 import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
 
+import java.util.Collection;
 import java.util.Optional;
 import java.util.stream.IntStream;
 import org.apache.hadoop.conf.Configuration;
@@ -40,15 +41,24 @@ public class ZNodePaths {
   // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
   public static final char ZNODE_PATH_SEPARATOR = '/';
 
-  public final static String META_ZNODE_PREFIX = "meta-region-server";
+  private static final String META_ZNODE_PREFIX = "meta-region-server";
   private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";
 
   // base znode for this cluster
   public final String baseZNode;
-  // the prefix of meta znode, does not include baseZNode.
-  public final String metaZNodePrefix;
-  // znodes containing the locations of the servers hosting the meta replicas
-  public final ImmutableMap<Integer, String> metaReplicaZNodes;
+
+  /**
+   * The prefix of meta znode. Does not include baseZNode.
+   * Its a 'prefix' because meta replica id integer can be tagged on the end (if
+   * no number present, it is 'default' replica).
+   */
+  private final String metaZNodePrefix;
+
+  /**
+   * znodes containing the locations of the servers hosting the meta replicas
+   */
+  private final ImmutableMap<Integer, String> metaReplicaZNodes;
+
   // znode containing ephemeral nodes of the regionservers
   public final String rsZNode;
   // znode containing ephemeral nodes of the draining regionservers
@@ -158,21 +168,21 @@ public class ZNodePaths {
   }
 
   /**
-   * Is the znode of any meta replica
-   * @param node
-   * @return true or false
+   * @return true if the znode is a meta region replica
    */
   public boolean isAnyMetaReplicaZNode(String node) {
-    if (metaReplicaZNodes.containsValue(node)) {
-      return true;
-    }
-    return false;
+    return this.metaReplicaZNodes.containsValue(node);
+  }
+
+  /**
+   * @return Meta Replica ZNodes
+   */
+  public Collection<String> getMetaReplicaZNodes() {
+    return this.metaReplicaZNodes.values();
   }
 
   /**
-   * Get the znode string corresponding to a replicaId
-   * @param replicaId
-   * @return znode
+   * @return the znode string corresponding to a replicaId
    */
   public String getZNodeForReplica(int replicaId) {
     // return a newly created path but don't update the cache of paths
@@ -183,24 +193,21 @@ public class ZNodePaths {
   }
 
   /**
-   * Parse the meta replicaId from the passed znode
+   * Parse the meta replicaId from the passed znode name.
    * @param znode the name of the znode, does not include baseZNode
    * @return replicaId
    */
   public int getMetaReplicaIdFromZnode(String znode) {
-    if (znode.equals(metaZNodePrefix)) {
-      return RegionInfo.DEFAULT_REPLICA_ID;
-    }
-    return Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
+    return znode.equals(metaZNodePrefix)?
+        RegionInfo.DEFAULT_REPLICA_ID:
+        Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
   }
 
   /**
-   * Is it the default meta replica's znode
-   * @param znode the name of the znode, does not include baseZNode
-   * @return true or false
+   * @return True if meta znode.
    */
-  public boolean isDefaultMetaReplicaZnode(String znode) {
-    return metaReplicaZNodes.get(DEFAULT_REPLICA_ID).equals(znode);
+  public boolean isMetaZNodePrefix(String znode) {
+    return znode != null && znode.startsWith(this.metaZNodePrefix);
   }
 
   /**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index a999e73..d810ba4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1175,8 +1175,7 @@ public final class HConstants {
   /** Directories that are not HBase user table directories */
   public static final List<String> HBASE_NON_USER_TABLE_DIRS =
     Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
-      new String[] { TableName.META_TABLE_NAME.getNameAsString() },
-      HBASE_NON_TABLE_DIRS.toArray())));
+       HBASE_NON_TABLE_DIRS.toArray())));
 
   /** Health script related settings. */
   public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location";
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index 2537e7f..e0a9eab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -25,25 +25,19 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 
 /**
  * Get, remove and modify table descriptors.
- * Used by servers to host descriptors.
  */
 @InterfaceAudience.Private
 public interface TableDescriptors {
   /**
-   * @param tableName
    * @return TableDescriptor for tablename
-   * @throws IOException
    */
-  TableDescriptor get(final TableName tableName)
-      throws IOException;
+  TableDescriptor get(final TableName tableName) throws IOException;
 
   /**
    * Get Map of all NamespaceDescriptors for a given namespace.
    * @return Map of all descriptors.
-   * @throws IOException
    */
-  Map<String, TableDescriptor> getByNamespace(String name)
-  throws IOException;
+  Map<String, TableDescriptor> getByNamespace(String name) throws IOException;
 
   /**
    * Get Map of all TableDescriptors. Populates the descriptor cache as a
@@ -51,25 +45,19 @@ public interface TableDescriptors {
    * Notice: the key of map is the table name which contains namespace. It was generated by
    * {@link TableName#getNameWithNamespaceInclAsString()}.
    * @return Map of all descriptors.
-   * @throws IOException
    */
   Map<String, TableDescriptor> getAll() throws IOException;
 
   /**
    * Add or update descriptor
    * @param htd Descriptor to set into TableDescriptors
-   * @throws IOException
    */
-  void add(final TableDescriptor htd)
-  throws IOException;
+  void add(final TableDescriptor htd) throws IOException;
 
   /**
-   * @param tablename
    * @return Instance of table descriptor or null if none found.
-   * @throws IOException
    */
-  TableDescriptor remove(final TableName tablename)
-  throws IOException;
+  TableDescriptor remove(final TableName tablename) throws IOException;
 
   /**
    * Enables the tabledescriptor cache
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 98841f9..f5a9ae3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -49,7 +49,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.function.Function;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import javax.servlet.ServletException;
@@ -670,10 +669,6 @@ public class HMaster extends HRegionServer implements MasterServices {
     return connector.getLocalPort();
   }
 
-  @Override
-  protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
-    return builder -> builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
-  }
   /**
    * For compatibility, if failed with regionserver credentials, try the master one
    */
@@ -1035,7 +1030,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     RegionState rs = this.assignmentManager.getRegionStates().
         getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO);
     LOG.info("hbase:meta {}", rs);
-    if (rs.isOffline()) {
+    if (rs != null && rs.isOffline()) {
       Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
         .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
       initMetaProc = optProc.orElseGet(() -> {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 1eb0416..5901ae2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
 import org.apache.hadoop.hbase.util.IdReadWriteLock;
 import org.apache.hadoop.hbase.util.ZKDataMigrator;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -53,8 +52,20 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 // TODO: Make this a guava Service
 @InterfaceAudience.Private
 public class TableStateManager {
-
   private static final Logger LOG = LoggerFactory.getLogger(TableStateManager.class);
+
+  /**
+   * All table state is kept in hbase:meta except that of hbase:meta itself.
+   * hbase:meta state is kept here locally in this in-memory variable. State
+   * for hbase:meta is not persistent. If this process dies, the hbase:meta
+   * state reverts to enabled. State is used so we can edit hbase:meta as we
+   * would any other table by disabling, altering, and then re-enabling. If this
+   * process dies in the midst of an edit, the table reverts to enabled. Schema
+   * is read from the filesystem. It is changed atomically so if we die midway
+   * through an edit we should be good.
+   */
+  private TableState.State metaTableState = TableState.State.ENABLED;
+
   /**
    * Set this key to false in Configuration to disable migrating table state from zookeeper so
    * hbase:meta table.
@@ -68,7 +79,7 @@ public class TableStateManager {
   private final ConcurrentMap<TableName, TableState.State> tableName2State =
     new ConcurrentHashMap<>();
 
-  public TableStateManager(MasterServices master) {
+  TableStateManager(MasterServices master) {
     this.master = master;
   }
 
@@ -87,61 +98,6 @@ public class TableStateManager {
     }
   }
 
-  /**
-   * Set table state to provided but only if table in specified states Caller should lock table on
-   * write.
-   * @param tableName table to change state for
-   * @param newState new state
-   * @param states states to check against
-   * @return null if succeed or table state if failed
-   */
-  public TableState setTableStateIfInStates(TableName tableName, TableState.State newState,
-      TableState.State... states) throws IOException {
-    ReadWriteLock lock = tnLock.getLock(tableName);
-    lock.writeLock().lock();
-    try {
-      TableState currentState = readMetaState(tableName);
-      if (currentState == null) {
-        throw new TableNotFoundException(tableName);
-      }
-      if (currentState.inStates(states)) {
-        updateMetaState(tableName, newState);
-        return null;
-      } else {
-        return currentState;
-      }
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Set table state to provided but only if table not in specified states Caller should lock table
-   * on write.
-   * @param tableName table to change state for
-   * @param newState new state
-   * @param states states to check against
-   */
-  public boolean setTableStateIfNotInStates(TableName tableName, TableState.State newState,
-      TableState.State... states) throws IOException {
-    ReadWriteLock lock = tnLock.getLock(tableName);
-    lock.writeLock().lock();
-    try {
-      TableState currentState = readMetaState(tableName);
-      if (currentState == null) {
-        throw new TableNotFoundException(tableName);
-      }
-      if (!currentState.inStates(states)) {
-        updateMetaState(tableName, newState);
-        return true;
-      } else {
-        return false;
-      }
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
   public boolean isTableState(TableName tableName, TableState.State... states) {
     try {
       TableState tableState = getTableState(tableName);
@@ -155,6 +111,7 @@ public class TableStateManager {
 
   public void setDeletedTable(TableName tableName) throws IOException {
     if (tableName.equals(TableName.META_TABLE_NAME)) {
+      // Can't delete the hbase:meta table.
       return;
     }
     ReadWriteLock lock = tnLock.getLock(tableName);
@@ -183,7 +140,7 @@ public class TableStateManager {
    * @param states filter by states
    * @return tables in given states
    */
-  public Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
+  Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
     // Only be called in region normalizer, will not use cache.
     final Set<TableName> rv = Sets.newHashSet();
     MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() {
@@ -199,12 +156,6 @@ public class TableStateManager {
     return rv;
   }
 
-  public static class TableStateNotFoundException extends TableNotFoundException {
-    TableStateNotFoundException(TableName tableName) {
-      super(tableName.getNameAsString());
-    }
-  }
-
   @NonNull
   public TableState getTableState(TableName tableName) throws IOException {
     ReadWriteLock lock = tnLock.getLock(tableName);
@@ -212,7 +163,7 @@ public class TableStateManager {
     try {
       TableState currentState = readMetaState(tableName);
       if (currentState == null) {
-        throw new TableStateNotFoundException(tableName);
+        throw new TableNotFoundException("No state found for " + tableName);
       }
       return currentState;
     } finally {
@@ -221,22 +172,18 @@ public class TableStateManager {
   }
 
   private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
-    if (tableName.equals(TableName.META_TABLE_NAME)) {
-      if (TableState.State.DISABLING.equals(newState) ||
-        TableState.State.DISABLED.equals(newState)) {
-        throw new IllegalArgumentIOException("Cannot disable the meta table; " + newState);
-      }
-      // Otherwise, just return; no need to set ENABLED on meta -- it is always ENABLED.
-      return;
-    }
     boolean succ = false;
     try {
-      MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState);
-      tableName2State.put(tableName, newState);
+      if (tableName.equals(TableName.META_TABLE_NAME)) {
+        this.metaTableState = newState;
+      } else {
+        MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState);
+      }
+      this.tableName2State.put(tableName, newState);
       succ = true;
     } finally {
       if (!succ) {
-        tableName2State.remove(tableName);
+        this.tableName2State.remove(tableName);
       }
     }
     metaStateUpdated(tableName, newState);
@@ -255,7 +202,9 @@ public class TableStateManager {
     if (state != null) {
       return new TableState(tableName, state);
     }
-    TableState tableState = MetaTableAccessor.getTableState(master.getConnection(), tableName);
+    TableState tableState = tableName.equals(TableName.META_TABLE_NAME)?
+        new TableState(TableName.META_TABLE_NAME, this.metaTableState):
+        MetaTableAccessor.getTableState(master.getConnection(), tableName);
     if (tableState != null) {
       tableName2State.putIfAbsent(tableName, tableState.getState());
     }
@@ -263,10 +212,8 @@ public class TableStateManager {
   }
 
   public void start() throws IOException {
-    TableDescriptors tableDescriptors = master.getTableDescriptors();
     migrateZooKeeper();
-    Connection connection = master.getConnection();
-    fixTableStates(tableDescriptors, connection);
+    fixTableStates(master.getTableDescriptors(), master.getConnection());
   }
 
   private void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
@@ -335,7 +282,7 @@ public class TableStateManager {
         TableState ts = null;
         try {
           ts = getTableState(entry.getKey());
-        } catch (TableStateNotFoundException e) {
+        } catch (TableNotFoundException e) {
           // This can happen; table exists but no TableState.
         }
         if (ts == null) {
@@ -377,4 +324,4 @@ public class TableStateManager {
       LOG.warn("Failed deleting table state from zookeeper", e);
     }
   }
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 69bc8f7..e4e4803 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -147,8 +147,7 @@ public class RegionStateStore {
     }
   }
 
-  public void updateRegionLocation(RegionStateNode regionStateNode)
-      throws IOException {
+  void updateRegionLocation(RegionStateNode regionStateNode) throws IOException {
     if (regionStateNode.getRegionInfo().isMetaRegion()) {
       updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(),
         regionStateNode.getState());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 34fde27..7b7df98 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -78,9 +78,7 @@ public class CreateTableProcedure
   @Override
   protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state)
       throws InterruptedException {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace(this + " execute state=" + state);
-    }
+    LOG.info("{} execute state={}", this, state);
     try {
       switch (state) {
         case CREATE_TABLE_PRE_OPERATION:
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index 18c194f..29c9fc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.TableStateManager;
@@ -109,8 +108,8 @@ public class DisableTableProcedure
           setNextState(DisableTableState.DISABLE_TABLE_ADD_REPLICATION_BARRIER);
           break;
         case DISABLE_TABLE_ADD_REPLICATION_BARRIER:
-          if (env.getMasterServices().getTableDescriptors().get(tableName)
-            .hasGlobalReplicationScope()) {
+          if (env.getMasterServices().getTableDescriptors().get(tableName).
+              hasGlobalReplicationScope()) {
             MasterFileSystem fs = env.getMasterFileSystem();
             try (BufferedMutator mutator = env.getMasterServices().getConnection()
               .getBufferedMutator(TableName.META_TABLE_NAME)) {
@@ -242,10 +241,7 @@ public class DisableTableProcedure
    */
   private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
     boolean canTableBeDisabled = true;
-    if (tableName.equals(TableName.META_TABLE_NAME)) {
-      setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table"));
-      canTableBeDisabled = false;
-    } else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+    if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
       setFailure("master-disable-table", new TableNotFoundException(tableName));
       canTableBeDisabled = false;
     } else if (!skipTableStateCheck) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
index 06d6a2c..6b60b93 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,23 +20,25 @@ package org.apache.hadoop.hbase.master.procedure;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import org.apache.hadoop.hbase.Cell;
+import java.util.concurrent.ExecutionException;
+
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.AsyncTable;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -45,6 +47,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.EnableTableState;
 
+
 @InterfaceAudience.Private
 public class EnableTableProcedure
     extends AbstractStateMachineTableProcedure<EnableTableState> {
@@ -57,7 +60,8 @@ public class EnableTableProcedure
 
   /**
    * Constructor
-   * @param env MasterProcedureEnv
+   *
+   * @param env       MasterProcedureEnv
    * @param tableName the table to operate on
    */
   public EnableTableProcedure(MasterProcedureEnv env, TableName tableName) {
@@ -66,7 +70,8 @@ public class EnableTableProcedure
 
   /**
    * Constructor
-   * @param env MasterProcedureEnv
+   *
+   * @param env       MasterProcedureEnv
    * @param tableName the table to operate on
    */
   public EnableTableProcedure(MasterProcedureEnv env, TableName tableName,
@@ -99,66 +104,53 @@ public class EnableTableProcedure
           setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
           break;
         case ENABLE_TABLE_MARK_REGIONS_ONLINE:
-          Connection connection = env.getMasterServices().getConnection();
-          // we will need to get the tableDescriptor here to see if there is a change in the replica
-          // count
-          TableDescriptor hTableDescriptor =
+          // Get the region replica count. If changed since disable, need to do
+          // more work assigning.
+          AsyncClusterConnection connection = env.getMasterServices().getAsyncClusterConnection();
+          TableDescriptor tableDescriptor =
               env.getMasterServices().getTableDescriptors().get(tableName);
-
-          // Get the replica count
-          int regionReplicaCount = hTableDescriptor.getRegionReplication();
-
-          // Get the regions for the table from memory; get both online and offline regions
-          // ('true').
+          int configuredReplicaCount = tableDescriptor.getRegionReplication();
+          // Get regions for the table from memory; get both online and offline regions ('true').
           List<RegionInfo> regionsOfTable =
               env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true);
 
-          int currentMaxReplica = 0;
-          // Check if the regions in memory have replica regions as marked in META table
-          for (RegionInfo regionInfo : regionsOfTable) {
-            if (regionInfo.getReplicaId() > currentMaxReplica) {
-              // Iterating through all the list to identify the highest replicaID region.
-              // We can stop after checking with the first set of regions??
-              currentMaxReplica = regionInfo.getReplicaId();
-            }
-          }
-
-          // read the META table to know the actual number of replicas for the table - if there
-          // was a table modification on region replica then this will reflect the new entries also
-          int replicasFound =
-              getNumberOfReplicasFromMeta(connection, regionReplicaCount, regionsOfTable);
-          assert regionReplicaCount - 1 == replicasFound;
-          LOG.info(replicasFound + " META entries added for the given regionReplicaCount "
-              + regionReplicaCount + " for the table " + tableName.getNameAsString());
-          if (currentMaxReplica == (regionReplicaCount - 1)) {
+          // How many replicas do we currently have? Check regions returned from
+          // in-memory state.
+          int currentMaxReplica = getMaxReplicaId(regionsOfTable);
+
+          // Read the META table to know the number of replicas the table currently has.
+          // If there was a table modification on region replica count then need to
+          // adjust replica counts here.
+          int replicasFound = getReplicaCount(connection, this.tableName);
+          LOG.info("replicasFound={} (configuredReplicaCount={} for {}", replicasFound,
+              configuredReplicaCount, tableName.getNameAsString());
+          if (currentMaxReplica == (configuredReplicaCount - 1)) {
             if (LOG.isDebugEnabled()) {
-              LOG.debug("There is no change to the number of region replicas."
-                  + " Assigning the available regions." + " Current and previous"
-                  + "replica count is " + regionReplicaCount);
+              LOG.debug("No change in number of region replicas (configuredReplicaCount={});"
+                  + " assigning.", configuredReplicaCount);
             }
-          } else if (currentMaxReplica > (regionReplicaCount - 1)) {
-            // we have additional regions as the replica count has been decreased. Delete
+          } else if (currentMaxReplica > (configuredReplicaCount - 1)) {
+            // We have additional regions as the replica count has been decreased. Delete
             // those regions because already the table is in the unassigned state
             LOG.info("The number of replicas " + (currentMaxReplica + 1)
-                + "  is more than the region replica count " + regionReplicaCount);
-            List<RegionInfo> copyOfRegions = new ArrayList<RegionInfo>(regionsOfTable);
+                + "  is more than the region replica count " + configuredReplicaCount);
+            List<RegionInfo> copyOfRegions = new ArrayList<>(regionsOfTable);
             for (RegionInfo regionInfo : copyOfRegions) {
-              if (regionInfo.getReplicaId() > (regionReplicaCount - 1)) {
+              if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) {
                 // delete the region from the regionStates
                 env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo);
                 // remove it from the list of regions of the table
-                LOG.info("The regioninfo being removed is " + regionInfo + " "
-                    + regionInfo.getReplicaId());
+                LOG.info("Removed replica={} of {}", regionInfo.getRegionId(), regionInfo);
                 regionsOfTable.remove(regionInfo);
               }
             }
           } else {
             // the replicasFound is less than the regionReplication
-            LOG.info("The number of replicas has been changed(increased)."
-                + " Lets assign the new region replicas. The previous replica count was "
-                + (currentMaxReplica + 1) + ". The current replica count is " + regionReplicaCount);
-            regionsOfTable = RegionReplicaUtil.addReplicas(hTableDescriptor, regionsOfTable,
-              currentMaxReplica + 1, regionReplicaCount);
+            LOG.info("Number of replicas has increased. Assigning new region replicas." +
+                    "The previous replica count was {}. The current replica count is {}.",
+                (currentMaxReplica + 1), configuredReplicaCount);
+            regionsOfTable = RegionReplicaUtil.addReplicas(tableDescriptor, regionsOfTable,
+                currentMaxReplica + 1, configuredReplicaCount);
           }
           // Assign all the table regions. (including region replicas if added).
           // createAssignProcedure will try to retain old assignments if possible.
@@ -177,38 +169,30 @@ public class EnableTableProcedure
       }
     } catch (IOException e) {
       if (isRollbackSupported(state)) {
-        setFailure("master-enable-table", e);
+        setFailure("master-enable-table", getCause(e));
       } else {
-        LOG.warn(
-          "Retriable error trying to enable table=" + tableName + " (in state=" + state + ")", e);
+        LOG.warn("Retryable error enabling {}, state={}", tableName, state, getCause(e));
       }
     }
     return Flow.HAS_MORE_STATE;
   }
 
-  private int getNumberOfReplicasFromMeta(Connection connection, int regionReplicaCount,
-      List<RegionInfo> regionsOfTable) throws IOException {
-    Result r = getRegionFromMeta(connection, regionsOfTable);
-    int replicasFound = 0;
-    for (int i = 1; i < regionReplicaCount; i++) {
-      // Since we have already added the entries to the META we will be getting only that here
-      List<Cell> columnCells =
-          r.getColumnCells(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i));
-      if (!columnCells.isEmpty()) {
-        replicasFound++;
-      }
-    }
-    return replicasFound;
+  /**
+   * @return If ExecutionException, pull out the cause.
+   */
+  private Throwable getCause(Exception e) {
+    return e instanceof ExecutionException? ((ExecutionException)e).getCause(): e;
   }
 
-  private Result getRegionFromMeta(Connection connection, List<RegionInfo> regionsOfTable)
-      throws IOException {
-    byte[] metaKeyForRegion = MetaTableAccessor.getMetaKeyForRegion(regionsOfTable.get(0));
-    Get get = new Get(metaKeyForRegion);
-    get.addFamily(HConstants.CATALOG_FAMILY);
-    Table metaTable = MetaTableAccessor.getMetaHTable(connection);
-    Result r = metaTable.get(get);
-    return r;
+  /**
+   * @return If hbase;meta table, it goes to the registry implementation which is what we want.
+   */
+  private int getReplicaCount(AsyncConnection connection, TableName tableName)
+      throws IOException, InterruptedException {
+    AsyncTable t = connection.getTable(TableName.META_TABLE_NAME);
+    List<HRegionLocation> rls =
+        FutureUtils.get(t.getRegionLocator().getRegionLocations(HConstants.EMPTY_START_ROW, true));
+    return rls.size();
   }
 
   @Override
@@ -408,4 +392,19 @@ public class EnableTableProcedure
       }
     }
   }
+
+  /**
+   * @return Maximum region replica id found in passed list of regions.
+   */
+  private static int getMaxReplicaId(List<RegionInfo> regions) {
+    int max = 0;
+    for (RegionInfo regionInfo: regions) {
+      if (regionInfo.getReplicaId() > max) {
+        // Iterating through all the list to identify the highest replicaID region.
+        // We can stop after checking with the first set of regions??
+        max = regionInfo.getReplicaId();
+      }
+    }
+    return max;
+  }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 755e0a3..6ab0077 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -23,10 +23,10 @@ import java.util.HashMap;
 import java.util.Map;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.TableStateManager;
-import org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
@@ -140,7 +140,7 @@ public abstract class AbstractPeerProcedure<TState> extends AbstractPeerNoLockPr
           return true;
         }
         Thread.sleep(SLEEP_INTERVAL_MS);
-      } catch (TableStateNotFoundException e) {
+      } catch (TableNotFoundException e) {
         return false;
       } catch (InterruptedException e) {
         throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index c4df613..d739b6d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,10 +20,10 @@ package org.apache.hadoop.hbase.master.replication;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.TableStateManager;
-import org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
@@ -125,7 +125,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
           return false;
         }
         Thread.sleep(SLEEP_INTERVAL_MS);
-      } catch (TableStateNotFoundException e) {
+      } catch (TableNotFoundException e) {
         return false;
       } catch (InterruptedException e) {
         throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java
index eb80a2a..98d7322 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.java
@@ -1,4 +1,4 @@
-/**
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -41,6 +41,6 @@ public class MetaLocationSyncer extends ClientZKSyncer {
 
   @Override
   Collection<String> getNodesToWatch() {
-    return watcher.getZNodePaths().metaReplicaZNodes.values();
+    return watcher.getZNodePaths().getMetaReplicaZNodes();
   }
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index a0a6b4c..ebe8506 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -744,9 +744,14 @@ public class HRegionServer extends HasThread implements
 
   protected TableDescriptors getFsTableDescriptors() throws IOException {
     return new FSTableDescriptors(this.conf,
-      this.fs, this.rootDir, !canUpdateTableDescriptor(), false, getMetaTableObserver());
+      this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
   }
 
+  /**
+   * @deprecated Since 2.3.0. Not needed anymore. Was used by Master to pass in replication
+   *   setting on hbase:meta construction. To be removed in hbase4.
+   */
+  @Deprecated
   protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
     return null;
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java
index 9ad0af2..9202133 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java
@@ -1,4 +1,6 @@
-/**
+/*
+ * Copyright The Apache Software Foundation
+ *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,6 +24,7 @@ import java.util.function.BiPredicate;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.yetus.audience.InterfaceAudience;
 
+
 /**
  * Check whether we need to reject the replication request from source cluster.
  */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 8ae5b4b..a6a4a16 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -24,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -99,10 +98,7 @@ public class FSTableDescriptors implements TableDescriptors {
   // TODO.
   private final Map<TableName, TableDescriptor> cache = new ConcurrentHashMap<>();
 
-  /**
-   * Table descriptor for <code>hbase:meta</code> catalog table
-   */
-  private final TableDescriptor metaTableDescriptor;
+  private final Configuration configuration;
 
   /**
    * Construct a FSTableDescriptors instance using the hbase root dir of the given
@@ -123,46 +119,35 @@ public class FSTableDescriptors implements TableDescriptors {
    *                   operations; i.e. on remove, we do not do delete in fs.
    */
   public FSTableDescriptors(final Configuration conf, final FileSystem fs,
-                            final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
-    this(conf, fs, rootdir, fsreadonly, usecache, null);
-  }
-
-  /**
-   * @param fsreadonly True if we are read-only when it comes to filesystem
-   *                   operations; i.e. on remove, we do not do delete in fs.
-   * @param metaObserver Used by HMaster. It need to modify the META_REPLICAS_NUM for meta table descriptor.
-   *                     see HMaster#finishActiveMasterInitialization
-   *                     TODO: This is a workaround. Should remove this ugly code...
-   */
-  public FSTableDescriptors(final Configuration conf, final FileSystem fs,
-                            final Path rootdir, final boolean fsreadonly, final boolean usecache,
-                            Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
+      final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
     this.fs = fs;
     this.rootdir = rootdir;
     this.fsreadonly = fsreadonly;
     this.usecache = usecache;
-    this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf)
-          : metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
+    this.configuration = conf;
   }
 
-  @VisibleForTesting
-  public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException {
+  /**
+   * @return Default hbase:meta schema.
+   */
+  private TableDescriptor createMetaTableDescriptor()
+      throws IOException {
     // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
     // the META table data goes to File mode BC only. Test how that affect the system. If too much,
     // we have to rethink about adding back the setCacheDataInL1 for META table CFs.
     return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
       .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
-        .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+        .setMaxVersions(this.configuration.getInt(HConstants.HBASE_META_VERSIONS,
                 HConstants.DEFAULT_HBASE_META_VERSIONS))
         .setInMemory(true)
-        .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+        .setBlocksize(this.configuration.getInt(HConstants.HBASE_META_BLOCK_SIZE,
                 HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
         .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
         // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
         .setBloomFilterType(BloomType.NONE)
         .build())
       .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY)
-        .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+        .setMaxVersions(this.configuration.getInt(HConstants.HBASE_META_VERSIONS,
             HConstants.DEFAULT_HBASE_META_VERSIONS))
         .setInMemory(true)
         .setBlocksize(8 * 1024)
@@ -180,10 +165,10 @@ public class FSTableDescriptors implements TableDescriptors {
         .build())
       .setColumnFamily(ColumnFamilyDescriptorBuilder
         .newBuilder(HConstants.NAMESPACE_FAMILY)
-        .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+        .setMaxVersions(this.configuration.getInt(HConstants.HBASE_META_VERSIONS,
                 HConstants.DEFAULT_HBASE_META_VERSIONS))
         .setInMemory(true)
-        .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+        .setBlocksize(this.configuration.getInt(HConstants.HBASE_META_BLOCK_SIZE,
                 HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
         .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
         // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
@@ -191,13 +176,10 @@ public class FSTableDescriptors implements TableDescriptors {
         .build())
       .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(
         MultiRowMutationEndpoint.class.getName())
-        .setPriority(Coprocessor.PRIORITY_SYSTEM).build());
-  }
-
-  @VisibleForTesting
-  public static TableDescriptor createMetaTableDescriptor(final Configuration conf)
-      throws IOException {
-    return createMetaTableDescriptorBuilder(conf).build();
+        .setPriority(Coprocessor.PRIORITY_SYSTEM)
+        .build())
+        .setRegionReplication(this.configuration.getInt(HConstants.META_REPLICAS_NUM,
+            HConstants.DEFAULT_META_REPLICA_NUM)).build();
   }
 
   @Override
@@ -228,16 +210,11 @@ public class FSTableDescriptors implements TableDescriptors {
   public TableDescriptor get(final TableName tablename)
   throws IOException {
     invocations++;
-    if (TableName.META_TABLE_NAME.equals(tablename)) {
-      cachehits++;
-      return metaTableDescriptor;
-    }
-    // hbase:meta is already handled. If some one tries to get the descriptor for
+    // If some one tries to get the descriptor for
     // .logs, .oldlogs or .corrupt throw an exception.
     if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
-       throw new IOException("No descriptor found for non table = " + tablename);
+      throw new IOException("No descriptor found for non table = " + tablename);
     }
-
     if (usecache) {
       // Look in cache of descriptors.
       TableDescriptor cachedtdm = this.cache.get(tablename);
@@ -246,24 +223,27 @@ public class FSTableDescriptors implements TableDescriptors {
         return cachedtdm;
       }
     }
-    TableDescriptor tdmt = null;
+    TableDescriptor td = null;
     try {
-      tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
+      td = getTableDescriptorFromFs(fs, rootdir, tablename);
     } catch (NullPointerException e) {
-      LOG.debug("Exception during readTableDecriptor. Current table name = "
-          + tablename, e);
+      LOG.debug("Exception during readTableDecriptor; tableName={}", tablename, e);
     } catch (TableInfoMissingException e) {
-      // ignore. This is regular operation
+      if (TableName.isMetaTableName(tablename)) {
+        // If we tried to access hbase:meta and it not there, create it.
+        td = createMetaTableDescriptor();
+        LOG.info("Creating new hbase:meta table default descriptor/schema {}", td);
+      }
     } catch (IOException ioe) {
       LOG.debug("Exception during readTableDecriptor. Current table name = "
           + tablename, ioe);
     }
     // last HTD written wins
-    if (usecache && tdmt != null) {
-      this.cache.put(tablename, tdmt);
+    if (usecache && td != null) {
+      this.cache.put(tablename, td);
     }
 
-    return tdmt;
+    return td;
   }
 
   /**
@@ -273,16 +253,21 @@ public class FSTableDescriptors implements TableDescriptors {
   public Map<String, TableDescriptor> getAll()
   throws IOException {
     Map<String, TableDescriptor> tds = new TreeMap<>();
-
     if (fsvisited && usecache) {
+      if (this.cache.get(TableName.META_TABLE_NAME) == null) {
+        // This get will create hbase:meta if it does not exist. Will also populate cache.
+        get(TableName.META_TABLE_NAME);
+      }
       for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
         tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
       }
-      // add hbase:meta to the response
-      tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
     } else {
       LOG.trace("Fetching table descriptors from the filesystem.");
       boolean allvisited = true;
+      // Add hbase:meta descriptor. The get will create hbase:meta in fs if doesn't
+      // exist. FSUtils listing table names in fs skip meta dirs. TODO: Fill out
+      // FSUtils with methods to get userspace tables and system tables.
+      tds.put(TableName.META_TABLE_NAME.toString(), get(TableName.META_TABLE_NAME));
       for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
         TableDescriptor htd = null;
         try {
@@ -336,14 +321,9 @@ public class FSTableDescriptors implements TableDescriptors {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
     }
-    TableName tableName = htd.getTableName();
-    if (TableName.META_TABLE_NAME.equals(tableName)) {
-      throw new NotImplementedException(HConstants.NOT_IMPLEMENTED);
-    }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
-      throw new NotImplementedException(
-          "Cannot add a table descriptor for a reserved subdirectory name: "
-              + htd.getTableName().getNameAsString());
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) {
+      throw new NotImplementedException("Cannot add Descriptor for reserved subdirectory name: " +
+          htd.getTableName().getNameAsString());
     }
     updateTableDescriptor(htd);
   }
@@ -369,26 +349,6 @@ public class FSTableDescriptors implements TableDescriptors {
     return descriptor;
   }
 
-  /**
-   * Checks if a current table info file exists for the given table
-   *
-   * @param tableName name of table
-   * @return true if exists
-   * @throws IOException
-   */
-  public boolean isTableInfoExists(TableName tableName) throws IOException {
-    return getTableInfoPath(tableName) != null;
-  }
-
-  /**
-   * Find the most current table info file for the given table in the hbase root directory.
-   * @return The file status of the current table info file or null if it does not exist
-   */
-  private FileStatus getTableInfoPath(final TableName tableName) throws IOException {
-    Path tableDir = getTableDir(tableName);
-    return getTableInfoPath(tableDir);
-  }
-
   private FileStatus getTableInfoPath(Path tableDir)
   throws IOException {
     return getTableInfoPath(fs, tableDir, !fsreadonly);
@@ -403,7 +363,6 @@ public class FSTableDescriptors implements TableDescriptors {
    * were sequence numbers).
    *
    * @return The file status of the current table info file or null if it does not exist
-   * @throws IOException
    */
   public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
   throws IOException {
@@ -421,7 +380,6 @@ public class FSTableDescriptors implements TableDescriptors {
    * older files.
    *
    * @return The file status of the current table info file or null if none exist
-   * @throws IOException
    */
   private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
   throws IOException {
@@ -610,21 +568,6 @@ public class FSTableDescriptors implements TableDescriptors {
   }
 
   /**
-   * Deletes all the table descriptor files from the file system.
-   * Used in unit tests only.
-   * @throws NotImplementedException if in read only mode
-   */
-  public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
-    if (fsreadonly) {
-      throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
-    }
-
-    Path tableDir = getTableDir(tableName);
-    Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
-    deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
-  }
-
-  /**
    * Deletes files matching the table info file pattern within the given directory
    * whose sequenceId is at most the given max sequenceId.
    */
@@ -760,6 +703,27 @@ public class FSTableDescriptors implements TableDescriptors {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
     }
+    return createTableDescriptorForTableDirectory(this.fs, tableDir, htd, forceCreation);
+  }
+
+  /**
+   * Create a new TableDescriptor in the specified table directory and filesystem. Happens when we
+   * create a new table or snapshot a table. This method doesn't require creationg of an
+   * {@link FSTableDescriptors} instance so it takes a bunch of arguments. Users of the method
+   * above used to create an FSTableDescriptors instance just to run the method. That was fine
+   * until construction started expecting to be able to read the hbase:meta schema. Snapshotting
+   * to some random dir would fail construction if no hbase:meta schema available.
+   * @param fs Filesystem to write to. Snapshot can set it to other than that of running system.
+   * @param tableDir table directory under which we should write the file
+   * @param htd description of the table to write
+   * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
+   *          be overwritten
+   * @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
+   *         already exists and we weren't forcing the descriptor creation.
+   * @throws IOException if a filesystem error occurs
+   */
+  public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tableDir,
+      TableDescriptor htd, boolean forceCreation) throws IOException {
     FileStatus status = getTableInfoPath(fs, tableDir);
     if (status != null) {
       LOG.debug("Current path=" + status.getPath());
@@ -775,6 +739,5 @@ public class FSTableDescriptors implements TableDescriptors {
     Path p = writeTableDescriptor(fs, htd, tableDir, status);
     return p != null;
   }
-
 }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index c5f6d32..b41e9b5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -121,7 +121,6 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
@@ -495,17 +494,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
   }
 
   /**
-   * @return META table descriptor
-   */
-  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
-    try {
-      return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
-    } catch (IOException e) {
-      throw new RuntimeException("Unable to create META table descriptor", e);
-    }
-  }
-
-  /**
    * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
    * to write temporary test data. Call this method after setting up the mini dfs cluster
    * if the test relies on it.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java
new file mode 100644
index 0000000..d201ed0
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+
+/**
+ * Test being able to edit hbase:meta.
+ */
+@Category({MiscTests.class, LargeTests.class})
+public class TestHBaseMetaEdit {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestHBaseMetaEdit.class);
+  @Rule
+  public TestName name = new TestName();
+  private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  @Before
+  public void before() throws Exception {
+    UTIL.startMiniCluster();
+  }
+
+  @After
+  public void after() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Set versions, set HBASE-16213 indexed block encoding, and add a column family.
+   * Verify they are all in place by looking at TableDescriptor AND by checking
+   * what the RegionServer sees after opening Region.
+   */
+  @Test
+  public void testEditMeta() throws IOException {
+    Admin admin = UTIL.getAdmin();
+    admin.disableTable(TableName.META_TABLE_NAME);
+    TableDescriptor descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
+    ColumnFamilyDescriptor cfd = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
+    byte [] extraColumnFamilyName = Bytes.toBytes("xtra");
+    ColumnFamilyDescriptor newCfd =
+        ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
+    int oldVersions = cfd.getMaxVersions();
+    // Add '1' to current versions count.
+    cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(oldVersions + 1).
+        setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING,
+            DataBlockEncoding.ROW_INDEX_V1.toString()).build();
+    admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd);
+    admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
+    descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
+    // Assert new max versions is == old versions plus 1.
+    assertEquals(oldVersions + 1,
+        descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
+    admin.enableTable(TableName.META_TABLE_NAME);
+    descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
+    // Assert new max versions is == old versions plus 1.
+    assertEquals(oldVersions + 1,
+        descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
+    assertTrue(descriptor.getColumnFamily(newCfd.getName()) != null);
+    String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().
+        get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
+    assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
+    Region r = UTIL.getHBaseCluster().getRegionServer(0).
+        getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
+    assertEquals(oldVersions + 1,
+        r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
+    encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().
+        getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
+    assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
+    assertTrue(r.getStore(extraColumnFamilyName) != null);
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 55b85bb..d723db1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.Waiter.Predicate;
-import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -505,22 +504,6 @@ public class TestAdmin2 extends TestAdminBase {
   }
 
   @Test
-  public void testDisableCatalogTable() throws Exception {
-    try {
-      ADMIN.disableTable(TableName.META_TABLE_NAME);
-      fail("Expected to throw ConstraintException");
-    } catch (ConstraintException e) {
-    }
-    // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
-    // actually getting disabled by the disableTable() call.
-    HTableDescriptor htd =
-        new HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName())));
-    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1"));
-    htd.addFamily(hcd);
-    TEST_UTIL.getAdmin().createTable(htd);
-  }
-
-  @Test
   public void testIsEnabledOrDisabledOnUnknownTable() throws Exception {
     try {
       ADMIN.isTableEnabled(TableName.valueOf(name.getMethodName()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAccessControlAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAccessControlAdminApi.java
index 9182e6f..33c8c10 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAccessControlAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAccessControlAdminApi.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
  * agreements. See the NOTICE file distributed with this work for additional information regarding
  * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
index 4f5cbf2..cd20e10 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
@@ -39,7 +39,6 @@ import java.util.Set;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 /**
  * Class to test asynchronous table admin operations
@@ -55,18 +54,6 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase {
       HBaseClassTestRule.forClass(TestAsyncTableAdminApi2.class);
 
   @Test
-  public void testDisableCatalogTable() throws Exception {
-    try {
-      this.admin.disableTable(TableName.META_TABLE_NAME).join();
-      fail("Expected to throw ConstraintException");
-    } catch (Exception e) {
-    }
-    // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
-    // actually getting disabled by the disableTable() call.
-    createTableWithDefaultConf(tableName);
-  }
-
-  @Test
   public void testAddColumnFamily() throws Exception {
     // Create a table with two families
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
index d4bb3be..c0b211b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
@@ -18,12 +18,9 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -32,7 +29,6 @@ import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 import java.util.regex.Pattern;
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
@@ -201,14 +197,6 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
       ok = false;
     }
     assertTrue(ok);
-    // meta table can not be disabled.
-    try {
-      admin.disableTable(TableName.META_TABLE_NAME).get();
-      fail("meta table can not be disabled");
-    } catch (ExecutionException e) {
-      Throwable cause = e.getCause();
-      assertThat(cause, instanceOf(DoNotRetryIOException.class));
-    }
   }
 
   @Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 55fc289..9b533c0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -147,6 +147,13 @@ public class TestMetaWithReplicas {
   public void testMetaHTDReplicaCount() throws Exception {
     assertEquals(3,
       TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication());
+    try (AsyncConnection connection =
+             ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) {
+      AsyncTable t = connection.getTable(TableName.META_TABLE_NAME);
+      List<HRegionLocation> rls =
+          t.getRegionLocator().getRegionLocations(HConstants.EMPTY_START_ROW, true).get();
+      assertEquals(3, rls.size());
+    }
   }
 
   @Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
index 189fd2e..128a247 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 77f796f..1489c17 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -949,7 +949,7 @@ public class TestDefaultMemStore {
     edge.setCurrentTimeMillis(1234);
     WALFactory wFactory = new WALFactory(conf, "1234");
     HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir,
-        conf, FSTableDescriptors.createMetaTableDescriptor(conf),
+        conf, (new FSTableDescriptors(conf)).get(TableName.META_TABLE_NAME),
         wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
     // parameterized tests add [#] suffix get rid of [ and ].
     TableDescriptor desc = TableDescriptorBuilder
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index 3da0341..419c1b3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -37,12 +38,15 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -87,8 +91,9 @@ public class TestGetClosestAtOrBefore  {
   public void testUsingMetaAndBinary() throws IOException {
     FileSystem filesystem = FileSystem.get(conf);
     Path rootdir = UTIL.getDataTestDirOnTestFS();
+
     // Up flush size else we bind up when we use default catalog flush of 16k.
-    TableDescriptorBuilder metaBuilder = UTIL.getMetaTableDescriptorBuilder()
+    TableDescriptorBuilder metaBuilder = createMetaTableDescriptorBuilder(UTIL.getConfiguration())
             .setMemStoreFlushSize(64 * 1024 * 1024);
 
     HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO,
@@ -368,6 +373,56 @@ public class TestGetClosestAtOrBefore  {
       }
     }
   }
-
+  private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf)
+      throws IOException {
+    // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
+    // the META table data goes to File mode BC only. Test how that affect the system. If too much,
+    // we have to rethink about adding back the setCacheDataInL1 for META table CFs.
+    return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
+            .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                HConstants.DEFAULT_HBASE_META_VERSIONS))
+            .setInMemory(true)
+            .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+                HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            .build())
+        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY)
+            .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                HConstants.DEFAULT_HBASE_META_VERSIONS))
+            .setInMemory(true)
+            .setBlocksize(8 * 1024)
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            .build())
+        .setColumnFamily(ColumnFamilyDescriptorBuilder
+            .newBuilder(HConstants.REPLICATION_BARRIER_FAMILY)
+            .setMaxVersions(HConstants.ALL_VERSIONS)
+            .setInMemory(true)
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            .build())
+        .setColumnFamily(ColumnFamilyDescriptorBuilder
+            .newBuilder(HConstants.NAMESPACE_FAMILY)
+            .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                HConstants.DEFAULT_HBASE_META_VERSIONS))
+            .setInMemory(true)
+            .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+                HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+            .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+            // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+            .setBloomFilterType(BloomType.NONE)
+            .build())
+        .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(
+            MultiRowMutationEndpoint.class.getName())
+            .setPriority(Coprocessor.PRIORITY_SYSTEM)
+            .build())
+        .setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM,
+            HConstants.DEFAULT_META_REPLICA_NUM));
+  }
 }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
index 819df67..133ed9c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -77,8 +78,6 @@ public class TestLogRollingNoCluster {
   /**
    * Spin up a bunch of threads and have them all append to a WAL.  Roll the
    * WAL frequently to try and trigger NPE.
-   * @throws IOException
-   * @throws InterruptedException
    */
   @Test
   public void testContendedLogRolling() throws Exception {
@@ -161,7 +160,8 @@ public class TestLogRollingNoCluster {
           byte[] bytes = Bytes.toBytes(i);
           edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
           RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
-          TableDescriptor htd = TEST_UTIL.getMetaTableDescriptorBuilder().build();
+          TableDescriptor htd =
+              new FSTableDescriptors(TEST_UTIL.getConfiguration()).get(TableName.META_TABLE_NAME);
           NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
           for(byte[] fam : htd.getColumnFamilyNames()) {
             scopes.put(fam, 0);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index a0c37f2..fe68948 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -322,7 +322,9 @@ public class TestFSTableDescriptors {
     }
 
     Map<String, TableDescriptor> tables = tds.getAll();
-    assertEquals(4, tables.size());
+    assertEquals(5, tables.size());
+    // Remove because it messes up below order test.
+    tables.remove(TableName.META_TABLE_NAME.toString());
 
     String[] tableNamesOrdered =
         new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" };
@@ -368,6 +370,9 @@ public class TestFSTableDescriptors {
 
     for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
       String t = (String) entry.getKey();
+      if (t.equals(TableName.META_TABLE_NAME.toString())) {
+        continue;
+      }
       TableDescriptor nchtd = entry.getValue();
       assertTrue("expected " + htd.toString() +
                    " got: " + chtds.get(TableName.valueOf(t)).toString(),
diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index 0cebc76..1183cd6 100644
--- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -62,14 +62,6 @@ public final class MetaTableLocator {
   }
 
   /**
-   * Checks if the meta region location is available.
-   * @return true if meta region location is available, false if not
-   */
-  public static boolean isLocationAvailable(ZKWatcher zkw) {
-    return getMetaRegionLocation(zkw) != null;
-  }
-
-  /**
    * @param zkw ZooKeeper watcher to be used
    * @return meta table regions and their locations.
    */
@@ -266,7 +258,7 @@ public final class MetaTableLocator {
   }
 
   /**
-   * Load the meta region state from the meta server ZNode.
+   * Load the meta region state from the meta region server ZNode.
    *
    * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
    * @param replicaId the ID of the replica
@@ -306,10 +298,8 @@ public final class MetaTableLocator {
     if (serverName == null) {
       state = RegionState.State.OFFLINE;
     }
-    return new RegionState(
-        RegionReplicaUtil.getRegionInfoForReplica(
-            RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId),
-        state, serverName);
+    return new RegionState(RegionReplicaUtil.getRegionInfoForReplica(
+        RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName);
   }
 
   /**
diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index 878f3ca..2e3e4b0 100644
--- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -2057,7 +2057,7 @@ public final class ZKUtil {
       " byte(s) of data from znode " + znode +
       (watcherSet? " and set watcher; ": "; data=") +
       (data == null? "null": data.length == 0? "empty": (
-          znode.startsWith(zkw.getZNodePaths().metaZNodePrefix)?
+          zkw.getZNodePaths().isMetaZNodePrefix(znode)?
             getServerNameOrEmptyString(data):
           znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)?
             getServerNameOrEmptyString(data):